1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2023 Marvell.
3 */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 struct mcs_event_cb {
9 TAILQ_ENTRY(mcs_event_cb) next;
10 enum roc_mcs_event_type event;
11 roc_mcs_dev_cb_fn cb_fn;
12 void *cb_arg;
13 void *userdata;
14 void *ret_param;
15 uint32_t active;
16 };
17 TAILQ_HEAD(mcs_event_cb_list, mcs_event_cb);
18
19 PLT_STATIC_ASSERT(ROC_MCS_MEM_SZ >= (sizeof(struct mcs_priv) + sizeof(struct mcs_event_cb_list)));
20
21 int
roc_mcs_hw_info_get(struct roc_mcs_hw_info * hw_info)22 roc_mcs_hw_info_get(struct roc_mcs_hw_info *hw_info)
23 {
24 struct mcs_hw_info *hw;
25 struct npa_lf *npa;
26 int rc;
27
28 MCS_SUPPORT_CHECK;
29
30 if (hw_info == NULL)
31 return -EINVAL;
32
33 /* Use mbox handler of first probed pci_func for
34 * initial mcs mbox communication.
35 */
36 npa = idev_npa_obj_get();
37 if (!npa)
38 return MCS_ERR_DEVICE_NOT_FOUND;
39
40 mbox_alloc_msg_mcs_get_hw_info(npa->mbox);
41 rc = mbox_process_msg(npa->mbox, (void *)&hw);
42 if (rc)
43 return rc;
44
45 hw_info->num_mcs_blks = hw->num_mcs_blks;
46 hw_info->tcam_entries = hw->tcam_entries;
47 hw_info->secy_entries = hw->secy_entries;
48 hw_info->sc_entries = hw->sc_entries;
49 hw_info->sa_entries = hw->sa_entries;
50
51 return rc;
52 }
53
54 int
roc_mcs_active_lmac_set(struct roc_mcs * mcs,struct roc_mcs_set_active_lmac * lmac)55 roc_mcs_active_lmac_set(struct roc_mcs *mcs, struct roc_mcs_set_active_lmac *lmac)
56 {
57 struct mcs_set_active_lmac *req;
58 struct msg_rsp *rsp;
59
60 /* Only needed for 105N */
61 if (!roc_model_is_cnf10kb())
62 return 0;
63
64 if (lmac == NULL)
65 return -EINVAL;
66
67 MCS_SUPPORT_CHECK;
68
69 req = mbox_alloc_msg_mcs_set_active_lmac(mcs->mbox);
70 if (req == NULL)
71 return -ENOMEM;
72
73 req->lmac_bmap = lmac->lmac_bmap;
74 req->channel_base = lmac->channel_base;
75 req->mcs_id = mcs->idx;
76
77 return mbox_process_msg(mcs->mbox, (void *)&rsp);
78 }
79
80 static int
mcs_port_reset_set(struct roc_mcs * mcs,struct roc_mcs_port_reset_req * port,uint8_t reset)81 mcs_port_reset_set(struct roc_mcs *mcs, struct roc_mcs_port_reset_req *port, uint8_t reset)
82 {
83 struct mcs_port_reset_req *req;
84 struct msg_rsp *rsp;
85
86 MCS_SUPPORT_CHECK;
87
88 req = mbox_alloc_msg_mcs_port_reset(mcs->mbox);
89 if (req == NULL)
90 return -ENOMEM;
91
92 req->reset = reset;
93 req->lmac_id = port->port_id;
94 req->mcs_id = mcs->idx;
95
96 return mbox_process_msg(mcs->mbox, (void *)&rsp);
97 }
98
99 int
roc_mcs_lmac_mode_set(struct roc_mcs * mcs,struct roc_mcs_set_lmac_mode * port)100 roc_mcs_lmac_mode_set(struct roc_mcs *mcs, struct roc_mcs_set_lmac_mode *port)
101 {
102 struct mcs_set_lmac_mode *req;
103 struct msg_rsp *rsp;
104
105 if (port == NULL)
106 return -EINVAL;
107
108 MCS_SUPPORT_CHECK;
109
110 req = mbox_alloc_msg_mcs_set_lmac_mode(mcs->mbox);
111 if (req == NULL)
112 return -ENOMEM;
113
114 req->lmac_id = port->lmac_id;
115 req->mcs_id = mcs->idx;
116 req->mode = port->mode;
117
118 return mbox_process_msg(mcs->mbox, (void *)&rsp);
119 }
120
121 int
roc_mcs_pn_threshold_set(struct roc_mcs * mcs,struct roc_mcs_set_pn_threshold * pn)122 roc_mcs_pn_threshold_set(struct roc_mcs *mcs, struct roc_mcs_set_pn_threshold *pn)
123 {
124 struct mcs_set_pn_threshold *req;
125 struct msg_rsp *rsp;
126
127 if (pn == NULL)
128 return -EINVAL;
129
130 MCS_SUPPORT_CHECK;
131
132 req = mbox_alloc_msg_mcs_set_pn_threshold(mcs->mbox);
133 if (req == NULL)
134 return -ENOMEM;
135
136 req->threshold = pn->threshold;
137 req->mcs_id = mcs->idx;
138 req->dir = pn->dir;
139 req->xpn = pn->xpn;
140
141 return mbox_process_msg(mcs->mbox, (void *)&rsp);
142 }
143
144 int
roc_mcs_ctrl_pkt_rule_alloc(struct roc_mcs * mcs,struct roc_mcs_alloc_ctrl_pkt_rule_req * req,struct roc_mcs_alloc_ctrl_pkt_rule_rsp * rsp)145 roc_mcs_ctrl_pkt_rule_alloc(struct roc_mcs *mcs, struct roc_mcs_alloc_ctrl_pkt_rule_req *req,
146 struct roc_mcs_alloc_ctrl_pkt_rule_rsp *rsp)
147 {
148 struct mcs_alloc_ctrl_pkt_rule_req *rule_req;
149 struct mcs_alloc_ctrl_pkt_rule_rsp *rule_rsp;
150 int rc;
151
152 MCS_SUPPORT_CHECK;
153
154 if (req == NULL || rsp == NULL)
155 return -EINVAL;
156
157 rule_req = mbox_alloc_msg_mcs_alloc_ctrl_pkt_rule(mcs->mbox);
158 if (rule_req == NULL)
159 return -ENOMEM;
160
161 rule_req->rule_type = req->rule_type;
162 rule_req->mcs_id = mcs->idx;
163 rule_req->dir = req->dir;
164
165 rc = mbox_process_msg(mcs->mbox, (void *)&rule_rsp);
166 if (rc)
167 return rc;
168
169 rsp->rule_type = rule_rsp->rule_type;
170 rsp->rule_idx = rule_rsp->rule_idx;
171 rsp->dir = rule_rsp->dir;
172
173 return 0;
174 }
175
176 int
roc_mcs_ctrl_pkt_rule_free(struct roc_mcs * mcs,struct roc_mcs_free_ctrl_pkt_rule_req * req)177 roc_mcs_ctrl_pkt_rule_free(struct roc_mcs *mcs, struct roc_mcs_free_ctrl_pkt_rule_req *req)
178 {
179 struct mcs_free_ctrl_pkt_rule_req *rule_req;
180 struct msg_rsp *rsp;
181
182 MCS_SUPPORT_CHECK;
183
184 if (req == NULL)
185 return -EINVAL;
186
187 rule_req = mbox_alloc_msg_mcs_free_ctrl_pkt_rule(mcs->mbox);
188 if (rule_req == NULL)
189 return -ENOMEM;
190
191 rule_req->rule_type = req->rule_type;
192 rule_req->rule_idx = req->rule_idx;
193 rule_req->mcs_id = mcs->idx;
194 rule_req->dir = req->dir;
195 rule_req->all = req->all;
196
197 return mbox_process_msg(mcs->mbox, (void *)&rsp);
198 }
199
200 int
roc_mcs_ctrl_pkt_rule_write(struct roc_mcs * mcs,struct roc_mcs_ctrl_pkt_rule_write_req * req)201 roc_mcs_ctrl_pkt_rule_write(struct roc_mcs *mcs, struct roc_mcs_ctrl_pkt_rule_write_req *req)
202 {
203 struct mcs_ctrl_pkt_rule_write_req *rule_req;
204 struct msg_rsp *rsp;
205
206 MCS_SUPPORT_CHECK;
207
208 if (req == NULL)
209 return -EINVAL;
210
211 rule_req = mbox_alloc_msg_mcs_ctrl_pkt_rule_write(mcs->mbox);
212 if (rule_req == NULL)
213 return -ENOMEM;
214
215 rule_req->rule_type = req->rule_type;
216 rule_req->rule_idx = req->rule_idx;
217 rule_req->mcs_id = mcs->idx;
218 rule_req->dir = req->dir;
219 rule_req->data0 = req->data0;
220 rule_req->data1 = req->data1;
221 rule_req->data2 = req->data2;
222
223 return mbox_process_msg(mcs->mbox, (void *)&rsp);
224 }
225
226 int
roc_mcs_port_cfg_set(struct roc_mcs * mcs,struct roc_mcs_port_cfg_set_req * req)227 roc_mcs_port_cfg_set(struct roc_mcs *mcs, struct roc_mcs_port_cfg_set_req *req)
228 {
229 struct mcs_port_cfg_set_req *set_req;
230 struct msg_rsp *rsp;
231
232 MCS_SUPPORT_CHECK;
233
234 if (req == NULL)
235 return -EINVAL;
236
237 set_req = mbox_alloc_msg_mcs_port_cfg_set(mcs->mbox);
238 if (set_req == NULL)
239 return -ENOMEM;
240
241 set_req->cstm_tag_rel_mode_sel = req->cstm_tag_rel_mode_sel;
242 set_req->custom_hdr_enb = req->custom_hdr_enb;
243 set_req->fifo_skid = req->fifo_skid;
244 set_req->lmac_mode = req->port_mode;
245 set_req->lmac_id = req->port_id;
246 set_req->mcs_id = mcs->idx;
247
248 return mbox_process_msg(mcs->mbox, (void *)&rsp);
249 }
250
251 int
roc_mcs_port_cfg_get(struct roc_mcs * mcs,struct roc_mcs_port_cfg_get_req * req,struct roc_mcs_port_cfg_get_rsp * rsp)252 roc_mcs_port_cfg_get(struct roc_mcs *mcs, struct roc_mcs_port_cfg_get_req *req,
253 struct roc_mcs_port_cfg_get_rsp *rsp)
254 {
255 struct mcs_port_cfg_get_req *get_req;
256 struct mcs_port_cfg_get_rsp *get_rsp;
257 int rc;
258
259 MCS_SUPPORT_CHECK;
260
261 if (req == NULL)
262 return -EINVAL;
263
264 get_req = mbox_alloc_msg_mcs_port_cfg_get(mcs->mbox);
265 if (get_req == NULL)
266 return -ENOMEM;
267
268 get_req->lmac_id = req->port_id;
269 get_req->mcs_id = mcs->idx;
270
271 rc = mbox_process_msg(mcs->mbox, (void *)&get_rsp);
272 if (rc)
273 return rc;
274
275 rsp->cstm_tag_rel_mode_sel = get_rsp->cstm_tag_rel_mode_sel;
276 rsp->custom_hdr_enb = get_rsp->custom_hdr_enb;
277 rsp->fifo_skid = get_rsp->fifo_skid;
278 rsp->port_mode = get_rsp->lmac_mode;
279 rsp->port_id = get_rsp->lmac_id;
280
281 return 0;
282 }
283
284 int
roc_mcs_custom_tag_cfg_get(struct roc_mcs * mcs,struct roc_mcs_custom_tag_cfg_get_req * req,struct roc_mcs_custom_tag_cfg_get_rsp * rsp)285 roc_mcs_custom_tag_cfg_get(struct roc_mcs *mcs, struct roc_mcs_custom_tag_cfg_get_req *req,
286 struct roc_mcs_custom_tag_cfg_get_rsp *rsp)
287 {
288 struct mcs_custom_tag_cfg_get_req *get_req;
289 struct mcs_custom_tag_cfg_get_rsp *get_rsp;
290 int i, rc;
291
292 MCS_SUPPORT_CHECK;
293
294 if (req == NULL)
295 return -EINVAL;
296
297 get_req = mbox_alloc_msg_mcs_custom_tag_cfg_get(mcs->mbox);
298 if (get_req == NULL)
299 return -ENOMEM;
300
301 get_req->dir = req->dir;
302 get_req->mcs_id = mcs->idx;
303
304 rc = mbox_process_msg(mcs->mbox, (void *)&get_rsp);
305 if (rc)
306 return rc;
307
308 for (i = 0; i < 8; i++) {
309 rsp->cstm_etype[i] = get_rsp->cstm_etype[i];
310 rsp->cstm_indx[i] = get_rsp->cstm_indx[i];
311 }
312
313 rsp->cstm_etype_en = get_rsp->cstm_etype_en;
314 rsp->dir = get_rsp->dir;
315
316 return 0;
317 }
318
319 int
roc_mcs_intr_configure(struct roc_mcs * mcs,struct roc_mcs_intr_cfg * config)320 roc_mcs_intr_configure(struct roc_mcs *mcs, struct roc_mcs_intr_cfg *config)
321 {
322 struct mcs_intr_cfg *req;
323 struct msg_rsp *rsp;
324 int rc;
325
326 if (config == NULL)
327 return -EINVAL;
328
329 MCS_SUPPORT_CHECK;
330
331 if (mcs->intr_cfg_once)
332 return 0;
333
334 req = mbox_alloc_msg_mcs_intr_cfg(mcs->mbox);
335 if (req == NULL)
336 return -ENOMEM;
337
338 req->intr_mask = config->intr_mask;
339 req->mcs_id = mcs->idx;
340
341 rc = mbox_process_msg(mcs->mbox, (void *)&rsp);
342 if (rc == 0)
343 mcs->intr_cfg_once = true;
344
345 return rc;
346 }
347
348 int
roc_mcs_port_recovery(struct roc_mcs * mcs,union roc_mcs_event_data * mdata,uint8_t port_id)349 roc_mcs_port_recovery(struct roc_mcs *mcs, union roc_mcs_event_data *mdata, uint8_t port_id)
350 {
351 struct mcs_priv *priv = roc_mcs_to_mcs_priv(mcs);
352 struct roc_mcs_pn_table_write_req pn_table = {0};
353 struct roc_mcs_rx_sc_sa_map rx_map = {0};
354 struct roc_mcs_tx_sc_sa_map tx_map = {0};
355 struct roc_mcs_port_reset_req port = {0};
356 struct roc_mcs_clear_stats stats = {0};
357 int tx_cnt = 0, rx_cnt = 0, rc = 0;
358 uint64_t set;
359 int i;
360
361 port.port_id = port_id;
362 rc = mcs_port_reset_set(mcs, &port, 1);
363
364 /* Reset TX/RX PN tables */
365 for (i = 0; i < (priv->sa_entries << 1); i++) {
366 set = plt_bitmap_get(priv->port_rsrc[port_id].sa_bmap, i);
367 if (set) {
368 pn_table.pn_id = i;
369 pn_table.next_pn = 1;
370 pn_table.dir = MCS_RX;
371 if (i >= priv->sa_entries) {
372 pn_table.dir = MCS_TX;
373 pn_table.pn_id -= priv->sa_entries;
374 }
375 rc = roc_mcs_pn_table_write(mcs, &pn_table);
376 if (rc)
377 return rc;
378
379 if (i >= priv->sa_entries)
380 tx_cnt++;
381 else
382 rx_cnt++;
383 }
384 }
385
386 if (tx_cnt || rx_cnt) {
387 mdata->tx_sa_array = plt_zmalloc(tx_cnt * sizeof(uint16_t), 0);
388 if (tx_cnt && (mdata->tx_sa_array == NULL)) {
389 rc = -ENOMEM;
390 goto exit;
391 }
392 mdata->rx_sa_array = plt_zmalloc(rx_cnt * sizeof(uint16_t), 0);
393 if (rx_cnt && (mdata->rx_sa_array == NULL)) {
394 rc = -ENOMEM;
395 goto exit;
396 }
397
398 mdata->num_tx_sa = tx_cnt;
399 mdata->num_rx_sa = rx_cnt;
400 for (i = 0; i < (priv->sa_entries << 1); i++) {
401 set = plt_bitmap_get(priv->port_rsrc[port_id].sa_bmap, i);
402 if (set) {
403 if (i >= priv->sa_entries)
404 mdata->tx_sa_array[--tx_cnt] = i - priv->sa_entries;
405 else
406 mdata->rx_sa_array[--rx_cnt] = i;
407 }
408 }
409 }
410 tx_cnt = 0;
411 rx_cnt = 0;
412
413 /* Reset Tx active SA to index:0 */
414 for (i = priv->sc_entries; i < (priv->sc_entries << 1); i++) {
415 set = plt_bitmap_get(priv->port_rsrc[port_id].sc_bmap, i);
416 if (set) {
417 uint16_t sc_id = i - priv->sc_entries;
418
419 tx_map.sa_index0 = priv->port_rsrc[port_id].sc_conf[sc_id].tx.sa_idx0;
420 tx_map.sa_index1 = priv->port_rsrc[port_id].sc_conf[sc_id].tx.sa_idx1;
421 tx_map.rekey_ena = priv->port_rsrc[port_id].sc_conf[sc_id].tx.rekey_enb;
422 tx_map.sectag_sci = priv->port_rsrc[port_id].sc_conf[sc_id].tx.sci;
423 tx_map.sa_index0_vld = 1;
424 tx_map.sa_index1_vld = 0;
425 tx_map.tx_sa_active = 0;
426 tx_map.sc_id = sc_id;
427 rc = roc_mcs_tx_sc_sa_map_write(mcs, &tx_map);
428 if (rc)
429 return rc;
430
431 tx_cnt++;
432 }
433 }
434
435 if (tx_cnt) {
436 mdata->tx_sc_array = plt_zmalloc(tx_cnt * sizeof(uint16_t), 0);
437 if (tx_cnt && (mdata->tx_sc_array == NULL)) {
438 rc = -ENOMEM;
439 goto exit;
440 }
441
442 mdata->num_tx_sc = tx_cnt;
443 for (i = priv->sc_entries; i < (priv->sc_entries << 1); i++) {
444 set = plt_bitmap_get(priv->port_rsrc[port_id].sc_bmap, i);
445 if (set)
446 mdata->tx_sc_array[--tx_cnt] = i - priv->sc_entries;
447 }
448 }
449
450 /* Clear SA_IN_USE for active ANs in RX CPM */
451 for (i = 0; i < priv->sc_entries; i++) {
452 set = plt_bitmap_get(priv->port_rsrc[port_id].sc_bmap, i);
453 if (set) {
454 rx_map.sa_index = priv->port_rsrc[port_id].sc_conf[i].rx.sa_idx;
455 rx_map.an = priv->port_rsrc[port_id].sc_conf[i].rx.an;
456 rx_map.sa_in_use = 0;
457 rx_map.sc_id = i;
458 rc = roc_mcs_rx_sc_sa_map_write(mcs, &rx_map);
459 if (rc)
460 return rc;
461
462 rx_cnt++;
463 }
464 }
465
466 /* Reset flow(flow/secy/sc/sa) stats mapped to this PORT */
467 for (i = 0; i < (priv->tcam_entries << 1); i++) {
468 set = plt_bitmap_get(priv->port_rsrc[port_id].tcam_bmap, i);
469 if (set) {
470 stats.type = MCS_FLOWID_STATS;
471 stats.id = i;
472 stats.dir = MCS_RX;
473 if (i >= priv->sa_entries) {
474 stats.dir = MCS_TX;
475 stats.id -= priv->tcam_entries;
476 }
477 rc = roc_mcs_stats_clear(mcs, &stats);
478 if (rc)
479 return rc;
480 }
481 }
482 for (i = 0; i < (priv->secy_entries << 1); i++) {
483 set = plt_bitmap_get(priv->port_rsrc[port_id].secy_bmap, i);
484 if (set) {
485 stats.type = MCS_SECY_STATS;
486 stats.id = i;
487 stats.dir = MCS_RX;
488 if (i >= priv->sa_entries) {
489 stats.dir = MCS_TX;
490 stats.id -= priv->secy_entries;
491 }
492 rc = roc_mcs_stats_clear(mcs, &stats);
493 if (rc)
494 return rc;
495 }
496 }
497 for (i = 0; i < (priv->sc_entries << 1); i++) {
498 set = plt_bitmap_get(priv->port_rsrc[port_id].sc_bmap, i);
499 if (set) {
500 stats.type = MCS_SC_STATS;
501 stats.id = i;
502 stats.dir = MCS_RX;
503 if (i >= priv->sa_entries) {
504 stats.dir = MCS_TX;
505 stats.id -= priv->sc_entries;
506 }
507 rc = roc_mcs_stats_clear(mcs, &stats);
508 if (rc)
509 return rc;
510 }
511 }
512 if (roc_model_is_cn10kb_a0()) {
513 for (i = 0; i < (priv->sa_entries << 1); i++) {
514 set = plt_bitmap_get(priv->port_rsrc[port_id].sa_bmap, i);
515 if (set) {
516 stats.type = MCS_SA_STATS;
517 stats.id = i;
518 stats.dir = MCS_RX;
519 if (i >= priv->sa_entries) {
520 stats.dir = MCS_TX;
521 stats.id -= priv->sa_entries;
522 }
523 rc = roc_mcs_stats_clear(mcs, &stats);
524 if (rc)
525 return rc;
526 }
527 }
528 }
529 {
530 stats.type = MCS_PORT_STATS;
531 stats.id = port_id;
532 rc = roc_mcs_stats_clear(mcs, &stats);
533 if (rc)
534 return rc;
535 }
536
537 if (rx_cnt) {
538 mdata->rx_sc_array = plt_zmalloc(rx_cnt * sizeof(uint16_t), 0);
539 if (mdata->rx_sc_array == NULL) {
540 rc = -ENOMEM;
541 goto exit;
542 }
543 mdata->sc_an_array = plt_zmalloc(rx_cnt * sizeof(uint8_t), 0);
544 if (mdata->sc_an_array == NULL) {
545 rc = -ENOMEM;
546 goto exit;
547 }
548
549 mdata->num_rx_sc = rx_cnt;
550 }
551
552 /* Reactivate in-use ANs for active SCs in RX CPM */
553 for (i = 0; i < priv->sc_entries; i++) {
554 set = plt_bitmap_get(priv->port_rsrc[port_id].sc_bmap, i);
555 if (set) {
556 rx_map.sa_index = priv->port_rsrc[port_id].sc_conf[i].rx.sa_idx;
557 rx_map.an = priv->port_rsrc[port_id].sc_conf[i].rx.an;
558 rx_map.sa_in_use = 1;
559 rx_map.sc_id = i;
560 rc = roc_mcs_rx_sc_sa_map_write(mcs, &rx_map);
561 if (rc)
562 return rc;
563
564 mdata->rx_sc_array[--rx_cnt] = i;
565 mdata->sc_an_array[rx_cnt] = priv->port_rsrc[port_id].sc_conf[i].rx.an;
566 }
567 }
568
569 port.port_id = port_id;
570 rc = mcs_port_reset_set(mcs, &port, 0);
571
572 return rc;
573 exit:
574 if (mdata->num_tx_sa)
575 plt_free(mdata->tx_sa_array);
576 if (mdata->num_rx_sa)
577 plt_free(mdata->rx_sa_array);
578 if (mdata->num_tx_sc)
579 plt_free(mdata->tx_sc_array);
580 if (mdata->num_rx_sc) {
581 plt_free(mdata->rx_sc_array);
582 plt_free(mdata->sc_an_array);
583 }
584 return rc;
585 }
586
587 int
roc_mcs_port_reset(struct roc_mcs * mcs,struct roc_mcs_port_reset_req * port)588 roc_mcs_port_reset(struct roc_mcs *mcs, struct roc_mcs_port_reset_req *port)
589 {
590 struct roc_mcs_event_desc desc = {0};
591 int rc;
592
593 /* Initiate port reset and software recovery */
594 rc = roc_mcs_port_recovery(mcs, &desc.metadata, port->port_id);
595 if (rc)
596 goto exit;
597
598 desc.type = ROC_MCS_EVENT_PORT_RESET_RECOVERY;
599 /* Notify the entity details to the application which are recovered */
600 mcs_event_cb_process(mcs, &desc);
601
602 exit:
603 if (desc.metadata.num_tx_sa)
604 plt_free(desc.metadata.tx_sa_array);
605 if (desc.metadata.num_rx_sa)
606 plt_free(desc.metadata.rx_sa_array);
607 if (desc.metadata.num_tx_sc)
608 plt_free(desc.metadata.tx_sc_array);
609 if (desc.metadata.num_rx_sc) {
610 plt_free(desc.metadata.rx_sc_array);
611 plt_free(desc.metadata.sc_an_array);
612 }
613
614 return rc;
615 }
616
617 int
roc_mcs_event_cb_register(struct roc_mcs * mcs,enum roc_mcs_event_type event,roc_mcs_dev_cb_fn cb_fn,void * cb_arg,void * userdata)618 roc_mcs_event_cb_register(struct roc_mcs *mcs, enum roc_mcs_event_type event,
619 roc_mcs_dev_cb_fn cb_fn, void *cb_arg, void *userdata)
620 {
621 struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
622 struct mcs_event_cb *cb;
623
624 if (cb_fn == NULL || cb_arg == NULL || userdata == NULL)
625 return -EINVAL;
626
627 MCS_SUPPORT_CHECK;
628
629 TAILQ_FOREACH(cb, cb_list, next) {
630 if (cb->cb_fn == cb_fn && cb->cb_arg == cb_arg && cb->event == event)
631 break;
632 }
633
634 if (cb == NULL) {
635 cb = plt_zmalloc(sizeof(struct mcs_event_cb), 0);
636 if (!cb)
637 return -ENOMEM;
638
639 cb->cb_fn = cb_fn;
640 cb->cb_arg = cb_arg;
641 cb->event = event;
642 cb->userdata = userdata;
643 TAILQ_INSERT_TAIL(cb_list, cb, next);
644 }
645
646 return 0;
647 }
648
649 int
roc_mcs_event_cb_unregister(struct roc_mcs * mcs,enum roc_mcs_event_type event)650 roc_mcs_event_cb_unregister(struct roc_mcs *mcs, enum roc_mcs_event_type event)
651 {
652 struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
653 struct mcs_event_cb *cb, *next;
654
655 MCS_SUPPORT_CHECK;
656
657 for (cb = TAILQ_FIRST(cb_list); cb != NULL; cb = next) {
658 next = TAILQ_NEXT(cb, next);
659
660 if (cb->event != event)
661 continue;
662
663 if (cb->active == 0) {
664 TAILQ_REMOVE(cb_list, cb, next);
665 plt_free(cb);
666 } else {
667 return -EAGAIN;
668 }
669 }
670
671 return 0;
672 }
673
674 int
mcs_event_cb_process(struct roc_mcs * mcs,struct roc_mcs_event_desc * desc)675 mcs_event_cb_process(struct roc_mcs *mcs, struct roc_mcs_event_desc *desc)
676 {
677 struct mcs_event_cb_list *cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
678 struct mcs_event_cb mcs_cb;
679 struct mcs_event_cb *cb;
680 int rc = 0;
681
682 TAILQ_FOREACH(cb, cb_list, next) {
683 if (cb->cb_fn == NULL || cb->event != desc->type)
684 continue;
685
686 mcs_cb = *cb;
687 cb->active = 1;
688 mcs_cb.ret_param = desc;
689
690 rc = mcs_cb.cb_fn(mcs_cb.userdata, mcs_cb.ret_param, mcs_cb.cb_arg,
691 mcs->sa_port_map[desc->metadata.sa_idx]);
692 cb->active = 0;
693 }
694
695 return rc;
696 }
697
698 static int
mcs_alloc_bmap(uint16_t entries,void ** mem,struct plt_bitmap ** bmap)699 mcs_alloc_bmap(uint16_t entries, void **mem, struct plt_bitmap **bmap)
700 {
701 size_t bmap_sz;
702 int rc = 0;
703
704 bmap_sz = plt_bitmap_get_memory_footprint(entries);
705 *mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
706 if (*mem == NULL)
707 rc = -ENOMEM;
708
709 *bmap = plt_bitmap_init(entries, *mem, bmap_sz);
710 if (!*bmap) {
711 plt_free(*mem);
712 *mem = NULL;
713 rc = -ENOMEM;
714 }
715
716 return rc;
717 }
718
719 static void
rsrc_bmap_free(struct mcs_rsrc * rsrc)720 rsrc_bmap_free(struct mcs_rsrc *rsrc)
721 {
722 plt_bitmap_free(rsrc->tcam_bmap);
723 plt_free(rsrc->tcam_bmap_mem);
724 plt_bitmap_free(rsrc->secy_bmap);
725 plt_free(rsrc->secy_bmap_mem);
726 plt_bitmap_free(rsrc->sc_bmap);
727 plt_free(rsrc->sc_bmap_mem);
728 plt_bitmap_free(rsrc->sa_bmap);
729 plt_free(rsrc->sa_bmap_mem);
730 }
731
732 static int
rsrc_bmap_alloc(struct mcs_priv * priv,struct mcs_rsrc * rsrc)733 rsrc_bmap_alloc(struct mcs_priv *priv, struct mcs_rsrc *rsrc)
734 {
735 int rc;
736
737 rc = mcs_alloc_bmap(priv->tcam_entries << 1, &rsrc->tcam_bmap_mem, &rsrc->tcam_bmap);
738 if (rc)
739 goto exit;
740
741 rc = mcs_alloc_bmap(priv->secy_entries << 1, &rsrc->secy_bmap_mem, &rsrc->secy_bmap);
742 if (rc)
743 goto exit;
744
745 rc = mcs_alloc_bmap(priv->sc_entries << 1, &rsrc->sc_bmap_mem, &rsrc->sc_bmap);
746 if (rc)
747 goto exit;
748
749 rc = mcs_alloc_bmap(priv->sa_entries << 1, &rsrc->sa_bmap_mem, &rsrc->sa_bmap);
750 if (rc)
751 goto exit;
752
753 return rc;
754 exit:
755 rsrc_bmap_free(rsrc);
756
757 return rc;
758 }
759
760 static int
mcs_alloc_rsrc_bmap(struct roc_mcs * mcs)761 mcs_alloc_rsrc_bmap(struct roc_mcs *mcs)
762 {
763 struct mcs_priv *priv = roc_mcs_to_mcs_priv(mcs);
764 struct mcs_hw_info *hw;
765 int i, rc;
766
767 mbox_alloc_msg_mcs_get_hw_info(mcs->mbox);
768 rc = mbox_process_msg(mcs->mbox, (void *)&hw);
769 if (rc)
770 return rc;
771
772 priv->num_mcs_blks = hw->num_mcs_blks;
773 priv->tcam_entries = hw->tcam_entries;
774 priv->secy_entries = hw->secy_entries;
775 priv->sc_entries = hw->sc_entries;
776 priv->sa_entries = hw->sa_entries;
777
778 rc = rsrc_bmap_alloc(priv, &priv->dev_rsrc);
779 if (rc)
780 return rc;
781
782 priv->port_rsrc = plt_zmalloc(sizeof(struct mcs_rsrc) * 4, 0);
783 if (priv->port_rsrc == NULL) {
784 rsrc_bmap_free(&priv->dev_rsrc);
785 return -ENOMEM;
786 }
787
788 for (i = 0; i < MAX_PORTS_PER_MCS; i++) {
789 rc = rsrc_bmap_alloc(priv, &priv->port_rsrc[i]);
790 if (rc)
791 goto exit;
792
793 priv->port_rsrc[i].sc_conf =
794 plt_zmalloc(priv->sc_entries * sizeof(struct mcs_sc_conf), 0);
795 if (priv->port_rsrc[i].sc_conf == NULL) {
796 rsrc_bmap_free(&priv->port_rsrc[i]);
797 goto exit;
798 }
799 }
800
801 mcs->sa_port_map = plt_zmalloc(sizeof(uint8_t) * hw->sa_entries, 0);
802 if (mcs->sa_port_map == NULL)
803 goto exit;
804
805 return rc;
806
807 exit:
808 while (i--) {
809 rsrc_bmap_free(&priv->port_rsrc[i]);
810 plt_free(priv->port_rsrc[i].sc_conf);
811 }
812 plt_free(priv->port_rsrc);
813
814 return -ENOMEM;
815 }
816
817 struct roc_mcs *
roc_mcs_dev_init(uint8_t mcs_idx)818 roc_mcs_dev_init(uint8_t mcs_idx)
819 {
820 struct mcs_event_cb_list *cb_list;
821 struct roc_mcs *mcs;
822 struct npa_lf *npa;
823
824 if (!(roc_feature_bphy_has_macsec() || roc_feature_nix_has_macsec()))
825 return NULL;
826
827 mcs = roc_idev_mcs_get(mcs_idx);
828 if (mcs) {
829 plt_info("Skipping device, mcs device already probed");
830 mcs->refcount++;
831 return mcs;
832 }
833
834 mcs = plt_zmalloc(sizeof(struct roc_mcs), PLT_CACHE_LINE_SIZE);
835 if (!mcs)
836 return NULL;
837
838 npa = idev_npa_obj_get();
839 if (!npa)
840 goto exit;
841
842 mcs->mbox = npa->mbox;
843 mcs->idx = mcs_idx;
844
845 /* Add any per mcsv initialization */
846 if (mcs_alloc_rsrc_bmap(mcs))
847 goto exit;
848
849 cb_list = (struct mcs_event_cb_list *)roc_mcs_to_mcs_cb_list(mcs);
850 TAILQ_INIT(cb_list);
851
852 roc_idev_mcs_set(mcs);
853 mcs->refcount++;
854
855 return mcs;
856 exit:
857 plt_free(mcs);
858 return NULL;
859 }
860
861 void
roc_mcs_dev_fini(struct roc_mcs * mcs)862 roc_mcs_dev_fini(struct roc_mcs *mcs)
863 {
864 struct mcs_priv *priv;
865 int i;
866
867 mcs->refcount--;
868 if (mcs->refcount > 0)
869 return;
870
871 priv = roc_mcs_to_mcs_priv(mcs);
872
873 rsrc_bmap_free(&priv->dev_rsrc);
874
875 for (i = 0; i < MAX_PORTS_PER_MCS; i++) {
876 rsrc_bmap_free(&priv->port_rsrc[i]);
877 plt_free(priv->port_rsrc[i].sc_conf);
878 }
879
880 plt_free(priv->port_rsrc);
881
882 plt_free(mcs->sa_port_map);
883
884 roc_idev_mcs_free(mcs);
885
886 plt_free(mcs);
887 }
888