xref: /dpdk/drivers/common/cnxk/roc_npc_mcam.c (revision 296e9040e2c15b424cf716bfdde0f5257c17a9fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include "roc_api.h"
5 #include "roc_priv.h"
6 
7 int
npc_mcam_alloc_counter(struct mbox * mbox,uint16_t * ctr)8 npc_mcam_alloc_counter(struct mbox *mbox, uint16_t *ctr)
9 {
10 	struct npc_mcam_alloc_counter_req *req;
11 	struct npc_mcam_alloc_counter_rsp *rsp;
12 	int rc = -ENOSPC;
13 
14 	req = mbox_alloc_msg_npc_mcam_alloc_counter(mbox_get(mbox));
15 	if (req == NULL)
16 		goto exit;
17 	req->count = 1;
18 	rc = mbox_process_msg(mbox, (void *)&rsp);
19 	if (rc)
20 		goto exit;
21 	*ctr = rsp->cntr_list[0];
22 exit:
23 	mbox_put(mbox);
24 	return rc;
25 }
26 
27 int
npc_mcam_free_counter(struct mbox * mbox,uint16_t ctr_id)28 npc_mcam_free_counter(struct mbox *mbox, uint16_t ctr_id)
29 {
30 	struct npc_mcam_oper_counter_req *req;
31 	int rc = -ENOSPC;
32 
33 	req = mbox_alloc_msg_npc_mcam_free_counter(mbox_get(mbox));
34 	if (req == NULL)
35 		goto exit;
36 	req->cntr = ctr_id;
37 	rc = mbox_process(mbox);
38 exit:
39 	mbox_put(mbox);
40 	return rc;
41 }
42 
43 int
npc_mcam_read_counter(struct mbox * mbox,uint32_t ctr_id,uint64_t * count)44 npc_mcam_read_counter(struct mbox *mbox, uint32_t ctr_id, uint64_t *count)
45 {
46 	struct npc_mcam_oper_counter_req *req;
47 	struct npc_mcam_oper_counter_rsp *rsp;
48 	int rc = -ENOSPC;
49 
50 	req = mbox_alloc_msg_npc_mcam_counter_stats(mbox_get(mbox));
51 	if (req == NULL)
52 		goto exit;
53 	req->cntr = ctr_id;
54 	rc = mbox_process_msg(mbox, (void *)&rsp);
55 	if (rc)
56 		goto exit;
57 	*count = rsp->stat;
58 exit:
59 	mbox_put(mbox);
60 	return rc;
61 }
62 
63 int
npc_mcam_clear_counter(struct mbox * mbox,uint32_t ctr_id)64 npc_mcam_clear_counter(struct mbox *mbox, uint32_t ctr_id)
65 {
66 	struct npc_mcam_oper_counter_req *req;
67 	int rc = -ENOSPC;
68 
69 	req = mbox_alloc_msg_npc_mcam_clear_counter(mbox_get(mbox));
70 	if (req == NULL)
71 		goto exit;
72 	req->cntr = ctr_id;
73 	rc = mbox_process(mbox);
74 exit:
75 	mbox_put(mbox);
76 	return rc;
77 }
78 
79 int
npc_mcam_free_entry(struct mbox * mbox,uint32_t entry)80 npc_mcam_free_entry(struct mbox *mbox, uint32_t entry)
81 {
82 	struct npc_mcam_free_entry_req *req;
83 	int rc = -ENOSPC;
84 
85 	req = mbox_alloc_msg_npc_mcam_free_entry(mbox_get(mbox));
86 	if (req == NULL)
87 		goto exit;
88 	req->entry = entry;
89 	rc = mbox_process(mbox);
90 exit:
91 	mbox_put(mbox);
92 	return rc;
93 }
94 
95 int
npc_mcam_free_all_entries(struct npc * npc)96 npc_mcam_free_all_entries(struct npc *npc)
97 {
98 	struct npc_mcam_free_entry_req *req;
99 	struct mbox *mbox = mbox_get(npc->mbox);
100 	int rc = -ENOSPC;
101 
102 	req = mbox_alloc_msg_npc_mcam_free_entry(mbox);
103 	if (req == NULL)
104 		goto exit;
105 	req->all = 1;
106 	rc = mbox_process(mbox);
107 exit:
108 	mbox_put(mbox);
109 	return rc;
110 }
111 
112 static int
npc_supp_key_len(uint32_t supp_mask)113 npc_supp_key_len(uint32_t supp_mask)
114 {
115 	int nib_count = 0;
116 
117 	while (supp_mask) {
118 		nib_count++;
119 		supp_mask &= (supp_mask - 1);
120 	}
121 	return nib_count * 4;
122 }
123 
124 /**
125  * Returns true if any LDATA bits are extracted for specific LID+LTYPE.
126  *
127  * No LFLAG extraction is taken into account.
128  */
129 static int
npc_lid_lt_in_kex(struct npc * npc,uint8_t lid,uint8_t lt)130 npc_lid_lt_in_kex(struct npc *npc, uint8_t lid, uint8_t lt)
131 {
132 	struct npc_xtract_info *x_info;
133 	int i;
134 
135 	for (i = 0; i < NPC_MAX_LD; i++) {
136 		x_info = &npc->prx_dxcfg[NIX_INTF_RX][lid][lt].xtract[i];
137 		/* Check for LDATA */
138 		if (x_info->enable && x_info->len > 0)
139 			return true;
140 	}
141 
142 	return false;
143 }
144 
145 static void
npc_construct_ldata_mask(struct npc * npc,struct plt_bitmap * bmap,uint8_t lid,uint8_t lt,uint8_t ld)146 npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid, uint8_t lt,
147 			 uint8_t ld)
148 {
149 	struct npc_xtract_info *x_info, *infoflag;
150 	int hdr_off, keylen;
151 	npc_dxcfg_t *p;
152 	npc_fxcfg_t *q;
153 	int i, j;
154 
155 	p = &npc->prx_dxcfg;
156 	x_info = &(*p)[0][lid][lt].xtract[ld];
157 
158 	if (x_info->enable == 0)
159 		return;
160 
161 	hdr_off = x_info->hdr_off * 8;
162 	keylen = x_info->len * 8;
163 	for (i = hdr_off; i < (hdr_off + keylen); i++)
164 		plt_bitmap_set(bmap, i);
165 
166 	if (x_info->flags_enable == 0)
167 		return;
168 
169 	if ((npc->prx_lfcfg[0].i & 0x7) != lid)
170 		return;
171 
172 	q = &npc->prx_fxcfg;
173 	for (j = 0; j < NPC_MAX_LFL; j++) {
174 		infoflag = &(*q)[0][ld][j].xtract[0];
175 		if (infoflag->enable) {
176 			hdr_off = infoflag->hdr_off * 8;
177 			keylen = infoflag->len * 8;
178 			for (i = hdr_off; i < (hdr_off + keylen); i++)
179 				plt_bitmap_set(bmap, i);
180 		}
181 	}
182 }
183 
184 /**
185  * Check if given LID+LTYPE combination is present in KEX
186  *
187  * len is non-zero, this function will return true if KEX extracts len bytes
188  * at given offset. Otherwise it'll return true if any bytes are extracted
189  * specifically for given LID+LTYPE combination (meaning not LFLAG based).
190  * The second case increases flexibility for custom frames whose extracted
191  * bits may change depending on KEX profile loaded.
192  *
193  * @param npc NPC context structure
194  * @param lid Layer ID to check for
195  * @param lt Layer Type to check for
196  * @param offset offset into the layer header to match
197  * @param len length of the match
198  */
199 static bool
npc_is_kex_enabled(struct npc * npc,uint8_t lid,uint8_t lt,int offset,int len)200 npc_is_kex_enabled(struct npc *npc, uint8_t lid, uint8_t lt, int offset, int len)
201 {
202 	struct plt_bitmap *bmap;
203 	uint32_t bmap_sz;
204 	uint8_t *mem;
205 	int i;
206 
207 	if (!len)
208 		return npc_lid_lt_in_kex(npc, lid, lt);
209 
210 	bmap_sz = plt_bitmap_get_memory_footprint(300 * 8);
211 	mem = plt_zmalloc(bmap_sz, 0);
212 	if (mem == NULL) {
213 		plt_err("mem alloc failed");
214 		return false;
215 	}
216 	bmap = plt_bitmap_init(300 * 8, mem, bmap_sz);
217 	if (bmap == NULL) {
218 		plt_err("mem alloc failed");
219 		plt_free(mem);
220 		return false;
221 	}
222 
223 	npc_construct_ldata_mask(npc, bmap, lid, lt, 0);
224 	npc_construct_ldata_mask(npc, bmap, lid, lt, 1);
225 
226 	for (i = offset; i < (offset + len); i++) {
227 		if (plt_bitmap_get(bmap, i) != 0x1) {
228 			plt_free(mem);
229 			return false;
230 		}
231 	}
232 
233 	plt_free(mem);
234 	return true;
235 }
236 
237 uint64_t
npc_get_kex_capability(struct npc * npc)238 npc_get_kex_capability(struct npc *npc)
239 {
240 	npc_kex_cap_terms_t kex_cap;
241 
242 	memset(&kex_cap, 0, sizeof(kex_cap));
243 
244 	/* Ethtype: Offset 12B, len 2B */
245 	kex_cap.bit.ethtype_0 = npc_is_kex_enabled(npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
246 	/* QINQ VLAN Ethtype: offset 8B, len 2B */
247 	kex_cap.bit.ethtype_x =
248 		npc_is_kex_enabled(npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
249 	/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
250 	kex_cap.bit.vlan_id_0 = npc_is_kex_enabled(npc, NPC_LID_LB, NPC_LT_LB_CTAG, 2 * 8, 2 * 8);
251 	/* VLAN PCP0 : Outer VLAN: Offset 2B, len 1B */
252 	kex_cap.bit.vlan_pcp_0 = npc_is_kex_enabled(npc, NPC_LID_LB, NPC_LT_LB_CTAG, 2 * 8, 2 * 1);
253 	/* VLAN IDX : Inner VLAN: offset 6B, len 2B */
254 	kex_cap.bit.vlan_id_x =
255 		npc_is_kex_enabled(npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6 * 8, 2 * 8);
256 	/* DMCA: offset 0B, len 6B */
257 	kex_cap.bit.dmac = npc_is_kex_enabled(npc, NPC_LID_LA, NPC_LT_LA_ETHER, 0 * 8, 6 * 8);
258 	/* IP proto: offset 9B, len 1B */
259 	kex_cap.bit.ip_proto = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 9 * 8, 1 * 8);
260 	/* IPv4 dscp: offset 1B, len 1B, IPv6 dscp: offset 0B, len 2B */
261 	kex_cap.bit.ip_dscp = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 1 * 8, 1 * 8) &&
262 			      npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6, 0, 2 * 8);
263 	/* UDP dport: offset 2B, len 2B */
264 	kex_cap.bit.udp_dport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_UDP, 2 * 8, 2 * 8);
265 	/* UDP sport: offset 0B, len 2B */
266 	kex_cap.bit.udp_sport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_UDP, 0 * 8, 2 * 8);
267 	/* TCP dport: offset 2B, len 2B */
268 	kex_cap.bit.tcp_dport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_TCP, 2 * 8, 2 * 8);
269 	/* TCP sport: offset 0B, len 2B */
270 	kex_cap.bit.tcp_sport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_TCP, 0 * 8, 2 * 8);
271 	/* IP SIP: offset 12B, len 4B */
272 	kex_cap.bit.sip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 12 * 8, 4 * 8);
273 	/* IP DIP: offset 14B, len 4B */
274 	kex_cap.bit.dip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 14 * 8, 4 * 8);
275 	/* IP6 SIP: offset 8B, len 16B */
276 	kex_cap.bit.sip6_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6, 8 * 8, 16 * 8);
277 	/* IP6 DIP: offset 24B, len 16B */
278 	kex_cap.bit.dip6_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6, 24 * 8, 16 * 8);
279 	/* ESP SPI: offset 0B, len 4B */
280 	kex_cap.bit.ipsec_spi = npc_is_kex_enabled(npc, NPC_LID_LE, NPC_LT_LE_ESP, 0 * 8, 4 * 8);
281 	/* VXLAN VNI: offset 4B, len 3B */
282 	kex_cap.bit.ld_vni = npc_is_kex_enabled(npc, NPC_LID_LE, NPC_LT_LE_VXLAN, 0 * 8, 3 * 8);
283 
284 	/* Custom L3 frame: varied offset and lengths */
285 	kex_cap.bit.custom_l3 = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM0, 0, 0);
286 	kex_cap.bit.custom_l3 |=
287 		(uint64_t)npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM1, 0, 0);
288 	/* SCTP sport : offset 0B, len 2B */
289 	kex_cap.bit.sctp_sport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_SCTP, 0 * 8, 2 * 8);
290 	/* SCTP dport : offset 2B, len 2B */
291 	kex_cap.bit.sctp_dport = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_SCTP, 2 * 8, 2 * 8);
292 	/* ICMP type : offset 0B, len 1B */
293 	kex_cap.bit.icmp_type = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_ICMP, 0 * 8, 1 * 8);
294 	/* ICMP code : offset 1B, len 1B */
295 	kex_cap.bit.icmp_code = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_ICMP, 1 * 8, 1 * 8);
296 	/* ICMP id : offset 4B, len 2B */
297 	kex_cap.bit.icmp_id = npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_ICMP, 4 * 8, 2 * 8);
298 	/* IGMP grp_addr : offset 4B, len 4B */
299 	kex_cap.bit.igmp_grp_addr =
300 		npc_is_kex_enabled(npc, NPC_LID_LD, NPC_LT_LD_IGMP, 4 * 8, 4 * 8);
301 	/* GTPU teid : offset 4B, len 4B */
302 	kex_cap.bit.gtpv1_teid = npc_is_kex_enabled(npc, NPC_LID_LE, NPC_LT_LE_GTPU, 4 * 8, 4 * 8);
303 	return kex_cap.all_bits;
304 }
305 
306 #define BYTESM1_SHIFT 16
307 #define HDR_OFF_SHIFT 8
308 static void
npc_update_kex_info(struct npc_xtract_info * xtract_info,uint64_t val)309 npc_update_kex_info(struct npc_xtract_info *xtract_info, uint64_t val)
310 {
311 	xtract_info->use_hash = ((val >> 20) & 0x1);
312 	xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
313 	xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
314 	xtract_info->key_off = val & 0x3f;
315 	xtract_info->enable = ((val >> 7) & 0x1);
316 	xtract_info->flags_enable = ((val >> 6) & 0x1);
317 }
318 
319 int
npc_mcam_alloc_entries(struct mbox * mbox,int ref_mcam,int * alloc_entry,int req_count,int prio,int * resp_count,bool is_conti)320 npc_mcam_alloc_entries(struct mbox *mbox, int ref_mcam, int *alloc_entry, int req_count, int prio,
321 		       int *resp_count, bool is_conti)
322 {
323 	struct npc_mcam_alloc_entry_req *req;
324 	struct npc_mcam_alloc_entry_rsp *rsp;
325 	int rc = -ENOSPC;
326 	int i;
327 
328 	req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox_get(mbox));
329 	if (req == NULL)
330 		goto exit;
331 	req->contig = is_conti;
332 	req->count = req_count;
333 	req->priority = prio;
334 	req->ref_entry = ref_mcam;
335 
336 	rc = mbox_process_msg(mbox, (void *)&rsp);
337 	if (rc)
338 		goto exit;
339 	for (i = 0; i < rsp->count; i++)
340 		alloc_entry[i] = rsp->entry_list[i];
341 	*resp_count = rsp->count;
342 	if (is_conti)
343 		alloc_entry[0] = rsp->entry;
344 	rc = 0;
345 exit:
346 	mbox_put(mbox);
347 	return rc;
348 }
349 
350 int
npc_mcam_alloc_entry(struct npc * npc,struct roc_npc_flow * mcam,struct roc_npc_flow * ref_mcam,int prio,int * resp_count)351 npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam, struct roc_npc_flow *ref_mcam,
352 		     int prio, int *resp_count)
353 {
354 	struct npc_mcam_alloc_entry_req *req;
355 	struct npc_mcam_alloc_entry_rsp *rsp;
356 	struct mbox *mbox = mbox_get(npc->mbox);
357 	int rc = -ENOSPC;
358 
359 	req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
360 	if (req == NULL)
361 		goto exit;
362 	req->contig = 1;
363 	req->count = 1;
364 	req->priority = prio;
365 	req->ref_entry = ref_mcam->mcam_id;
366 
367 	rc = mbox_process_msg(mbox, (void *)&rsp);
368 	if (rc)
369 		goto exit;
370 	memset(mcam, 0, sizeof(struct roc_npc_flow));
371 	mcam->mcam_id = rsp->entry;
372 	mcam->nix_intf = ref_mcam->nix_intf;
373 	*resp_count = rsp->count;
374 	rc = 0;
375 exit:
376 	mbox_put(mbox);
377 	return rc;
378 }
379 
380 int
npc_mcam_ena_dis_entry(struct npc * npc,struct roc_npc_flow * mcam,bool enable)381 npc_mcam_ena_dis_entry(struct npc *npc, struct roc_npc_flow *mcam, bool enable)
382 {
383 	struct npc_mcam_ena_dis_entry_req *req;
384 	struct mbox *mbox = mbox_get(npc->mbox);
385 	int rc = -ENOSPC;
386 
387 	if (enable)
388 		req = mbox_alloc_msg_npc_mcam_ena_entry(mbox);
389 	else
390 		req = mbox_alloc_msg_npc_mcam_dis_entry(mbox);
391 
392 	if (req == NULL)
393 		goto exit;
394 	req->entry = mcam->mcam_id;
395 	mcam->enable = enable;
396 	rc = mbox_process(mbox);
397 exit:
398 	mbox_put(mbox);
399 	return rc;
400 }
401 
402 int
npc_mcam_write_entry(struct mbox * mbox,struct roc_npc_flow * mcam)403 npc_mcam_write_entry(struct mbox *mbox, struct roc_npc_flow *mcam)
404 {
405 	struct npc_mcam_write_entry_req *req;
406 	struct mbox_msghdr *rsp;
407 	int rc = -ENOSPC;
408 	uint16_t ctr = 0;
409 	int i;
410 
411 	if (mcam->use_ctr && mcam->ctr_id == NPC_COUNTER_NONE) {
412 		rc = npc_mcam_alloc_counter(mbox, &ctr);
413 		if (rc)
414 			return rc;
415 		mcam->ctr_id = ctr;
416 
417 		rc = npc_mcam_clear_counter(mbox, mcam->ctr_id);
418 		if (rc)
419 			return rc;
420 	}
421 
422 	req = mbox_alloc_msg_npc_mcam_write_entry(mbox_get(mbox));
423 	if (req == NULL) {
424 		mbox_put(mbox);
425 		if (mcam->use_ctr)
426 			npc_mcam_free_counter(mbox, ctr);
427 
428 		return rc;
429 	}
430 	req->entry = mcam->mcam_id;
431 	req->intf = mcam->nix_intf;
432 	req->enable_entry = mcam->enable;
433 	req->entry_data.action = mcam->npc_action;
434 	req->entry_data.vtag_action = mcam->vtag_action;
435 	if (mcam->use_ctr) {
436 		req->set_cntr = 1;
437 		req->cntr = mcam->ctr_id;
438 	}
439 
440 	for (i = 0; i < NPC_MCAM_KEY_X4_WORDS; i++) {
441 		req->entry_data.kw[i] = mcam->mcam_data[i];
442 		req->entry_data.kw_mask[i] = mcam->mcam_mask[i];
443 	}
444 	rc = mbox_process_msg(mbox, (void *)&rsp);
445 	mbox_put(mbox);
446 	return rc;
447 }
448 
449 static void
npc_mcam_process_mkex_cfg(struct npc * npc,struct npc_get_kex_cfg_rsp * kex_rsp)450 npc_mcam_process_mkex_cfg(struct npc *npc, struct npc_get_kex_cfg_rsp *kex_rsp)
451 {
452 	volatile uint64_t(*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
453 	struct npc_xtract_info *x_info = NULL;
454 	int lid, lt, ld, fl, ix;
455 	npc_dxcfg_t *p;
456 	uint64_t keyw;
457 	uint64_t val;
458 
459 	npc->keyx_supp_nmask[NPC_MCAM_RX] = kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
460 	npc->keyx_supp_nmask[NPC_MCAM_TX] = kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
461 	npc->keyx_len[NPC_MCAM_RX] = npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
462 	npc->keyx_len[NPC_MCAM_TX] = npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
463 
464 	keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
465 	npc->keyw[NPC_MCAM_RX] = keyw;
466 	keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
467 	npc->keyw[NPC_MCAM_TX] = keyw;
468 
469 	/* Update KEX_LD_FLAG */
470 	for (ix = 0; ix < NPC_MAX_INTF; ix++) {
471 		for (ld = 0; ld < NPC_MAX_LD; ld++) {
472 			for (fl = 0; fl < NPC_MAX_LFL; fl++) {
473 				x_info = &npc->prx_fxcfg[ix][ld][fl].xtract[0];
474 				val = kex_rsp->intf_ld_flags[ix][ld][fl];
475 				npc_update_kex_info(x_info, val);
476 			}
477 		}
478 	}
479 
480 	/* Update LID, LT and LDATA cfg */
481 	p = &npc->prx_dxcfg;
482 	q = (volatile uint64_t(*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])(&kex_rsp->intf_lid_lt_ld);
483 	for (ix = 0; ix < NPC_MAX_INTF; ix++) {
484 		for (lid = 0; lid < NPC_MAX_LID; lid++) {
485 			for (lt = 0; lt < NPC_MAX_LT; lt++) {
486 				for (ld = 0; ld < NPC_MAX_LD; ld++) {
487 					x_info = &(*p)[ix][lid][lt].xtract[ld];
488 					val = (*q)[ix][lid][lt][ld];
489 					npc_update_kex_info(x_info, val);
490 				}
491 			}
492 		}
493 	}
494 	/* Update LDATA Flags cfg */
495 	npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
496 	npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
497 }
498 
499 int
npc_mcam_fetch_hw_cap(struct npc * npc,uint8_t * npc_hw_cap)500 npc_mcam_fetch_hw_cap(struct npc *npc, uint8_t *npc_hw_cap)
501 {
502 	struct get_hw_cap_rsp *hw_cap_rsp;
503 	struct mbox *mbox = mbox_get(npc->mbox);
504 	int rc = 0;
505 
506 	*npc_hw_cap = 0;
507 
508 	mbox_alloc_msg_get_hw_cap(mbox);
509 	rc = mbox_process_msg(mbox, (void *)&hw_cap_rsp);
510 	if (rc) {
511 		plt_err("Failed to fetch NPC HW capability");
512 		goto done;
513 	}
514 
515 	*npc_hw_cap = hw_cap_rsp->npc_hash_extract;
516 done:
517 	mbox_put(mbox);
518 	return rc;
519 }
520 
521 int
npc_mcam_fetch_kex_cfg(struct npc * npc)522 npc_mcam_fetch_kex_cfg(struct npc *npc)
523 {
524 	struct npc_get_kex_cfg_rsp *kex_rsp;
525 	struct mbox *mbox = mbox_get(npc->mbox);
526 	int rc = 0;
527 
528 	mbox_alloc_msg_npc_get_kex_cfg(mbox);
529 	rc = mbox_process_msg(mbox, (void *)&kex_rsp);
530 	if (rc) {
531 		plt_err("Failed to fetch NPC KEX config");
532 		goto done;
533 	}
534 
535 	mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name, MKEX_NAME_LEN);
536 
537 	npc->exact_match_ena = (kex_rsp->rx_keyx_cfg >> 40) & 0xF;
538 	npc_mcam_process_mkex_cfg(npc, kex_rsp);
539 
540 done:
541 	mbox_put(mbox);
542 	return rc;
543 }
544 
545 static void
npc_mcam_set_channel(struct roc_npc_flow * flow,struct npc_mcam_write_entry_req * req,uint16_t channel,uint16_t chan_mask,bool is_second_pass)546 npc_mcam_set_channel(struct roc_npc_flow *flow, struct npc_mcam_write_entry_req *req,
547 		     uint16_t channel, uint16_t chan_mask, bool is_second_pass)
548 {
549 	uint16_t chan = 0, mask = 0;
550 
551 	req->entry_data.kw[0] &= ~(GENMASK(11, 0));
552 	req->entry_data.kw_mask[0] &= ~(GENMASK(11, 0));
553 	flow->mcam_data[0] &= ~(GENMASK(11, 0));
554 	flow->mcam_mask[0] &= ~(GENMASK(11, 0));
555 	chan = channel;
556 	mask = chan_mask;
557 
558 	if (roc_model_runtime_is_cn10k()) {
559 		if (is_second_pass) {
560 			chan = (channel | NIX_CHAN_CPT_CH_START);
561 			mask = (chan_mask | NIX_CHAN_CPT_CH_START);
562 		} else {
563 			if (!(flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
564 				/*
565 				 * Clear bits 10 & 11 corresponding to CPT
566 				 * channel. By default, rules should match
567 				 * both first pass packets and second pass
568 				 * packets from CPT.
569 				 */
570 				chan = (channel & NIX_CHAN_CPT_X2P_MASK);
571 				mask = (chan_mask & NIX_CHAN_CPT_X2P_MASK);
572 			}
573 		}
574 	}
575 
576 	req->entry_data.kw[0] |= (uint64_t)chan;
577 	req->entry_data.kw_mask[0] |= (uint64_t)mask;
578 	flow->mcam_data[0] |= (uint64_t)chan;
579 	flow->mcam_mask[0] |= (uint64_t)mask;
580 }
581 
582 static int
npc_mcam_set_pf_func(struct npc * npc,struct roc_npc_flow * flow,uint16_t pf_func)583 npc_mcam_set_pf_func(struct npc *npc, struct roc_npc_flow *flow, uint16_t pf_func)
584 {
585 #define NPC_PF_FUNC_WIDTH    2
586 #define NPC_KEX_PF_FUNC_MASK 0xFFFF
587 	uint16_t nr_bytes, hdr_offset, key_offset, pf_func_offset;
588 	uint8_t *flow_mcam_data, *flow_mcam_mask;
589 	struct npc_lid_lt_xtract_info *xinfo;
590 	bool pffunc_found = false;
591 	uint16_t mask = 0xFFFF;
592 	int i;
593 
594 	flow_mcam_data = (uint8_t *)flow->mcam_data;
595 	flow_mcam_mask = (uint8_t *)flow->mcam_mask;
596 
597 	xinfo = &npc->prx_dxcfg[NIX_INTF_TX][NPC_LID_LA][NPC_LT_LA_IH_NIX_ETHER];
598 
599 	for (i = 0; i < NPC_MAX_LD; i++) {
600 		nr_bytes = xinfo->xtract[i].len;
601 		hdr_offset = xinfo->xtract[i].hdr_off;
602 		key_offset = xinfo->xtract[i].key_off;
603 
604 		if (hdr_offset > 0 || nr_bytes < NPC_PF_FUNC_WIDTH)
605 			continue;
606 		else
607 			pffunc_found = true;
608 
609 		pf_func_offset = key_offset + nr_bytes - NPC_PF_FUNC_WIDTH;
610 		memcpy((void *)&flow_mcam_data[pf_func_offset], (uint8_t *)&pf_func,
611 		       NPC_PF_FUNC_WIDTH);
612 		memcpy((void *)&flow_mcam_mask[pf_func_offset], (uint8_t *)&mask,
613 		       NPC_PF_FUNC_WIDTH);
614 	}
615 	if (!pffunc_found)
616 		return -EINVAL;
617 
618 	return 0;
619 }
620 
621 int
npc_mcam_alloc_and_write(struct npc * npc,struct roc_npc_flow * flow,struct npc_parse_state * pst)622 npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_parse_state *pst)
623 {
624 	struct npc_mcam_write_entry_req *req;
625 	struct nix_inl_dev *inl_dev = NULL;
626 	struct mbox *mbox = npc->mbox;
627 	struct mbox_msghdr *rsp;
628 	struct idev_cfg *idev;
629 	uint16_t pf_func = 0;
630 	uint16_t ctr = ~(0);
631 	uint32_t la_offset;
632 	uint64_t mask;
633 	int rc, idx;
634 	int entry;
635 
636 	PLT_SET_USED(pst);
637 
638 	idev = idev_get_cfg();
639 	if (idev)
640 		inl_dev = idev->nix_inl_dev;
641 
642 	if (inl_dev && inl_dev->ipsec_index) {
643 		if (flow->is_inline_dev)
644 			mbox = inl_dev->dev.mbox;
645 	}
646 
647 	if (flow->use_ctr) {
648 		rc = npc_mcam_alloc_counter(mbox, &ctr);
649 		if (rc)
650 			return rc;
651 
652 		flow->ctr_id = ctr;
653 		rc = npc_mcam_clear_counter(mbox, flow->ctr_id);
654 		if (rc)
655 			return rc;
656 	}
657 
658 	if (flow->nix_intf == NIX_INTF_RX && flow->is_inline_dev && inl_dev &&
659 	    inl_dev->ipsec_index && inl_dev->is_multi_channel) {
660 		if (inl_dev->curr_ipsec_idx >= inl_dev->alloc_ipsec_rules)
661 			return NPC_ERR_MCAM_ALLOC;
662 		entry = inl_dev->ipsec_index[inl_dev->curr_ipsec_idx];
663 		inl_dev->curr_ipsec_idx++;
664 		flow->use_pre_alloc = 1;
665 	} else {
666 		entry = npc_get_free_mcam_entry(mbox, flow, npc);
667 		if (entry < 0) {
668 			if (flow->use_ctr)
669 				npc_mcam_free_counter(mbox, ctr);
670 			return NPC_ERR_MCAM_ALLOC;
671 		}
672 	}
673 
674 	if (flow->nix_intf == NIX_INTF_TX) {
675 		uint16_t pf_func = flow->tx_pf_func;
676 
677 		if (flow->has_rep)
678 			pf_func = flow->rep_pf_func;
679 
680 		pf_func = plt_cpu_to_be_16(pf_func);
681 
682 		rc = npc_mcam_set_pf_func(npc, flow, pf_func);
683 		if (rc)
684 			return rc;
685 	}
686 
687 	if (flow->is_sampling_rule) {
688 		/* Save and restore any mark value set */
689 		uint16_t mark = (flow->npc_action >> 40) & 0xffff;
690 		uint16_t mce_index = 0;
691 		uint32_t rqs[2] = {};
692 
693 		rqs[1] = flow->recv_queue;
694 		rc = roc_nix_mcast_list_setup(npc->mbox, flow->nix_intf, 2, flow->mcast_pf_funcs,
695 					      flow->mcast_channels, rqs, &flow->mcast_grp_index,
696 					      &flow->mce_start_index);
697 		if (rc)
698 			return rc;
699 
700 		flow->npc_action = NIX_RX_ACTIONOP_MCAST;
701 		mce_index = flow->mce_start_index;
702 		if (flow->nix_intf == NIX_INTF_TX) {
703 			flow->npc_action |= (uint64_t)mce_index << 12;
704 			flow->npc_action |= (uint64_t)mark << 32;
705 		} else {
706 			flow->npc_action |= (uint64_t)mce_index << 20;
707 			flow->npc_action |= (uint64_t)mark << 40;
708 		}
709 	}
710 
711 	req = mbox_alloc_msg_npc_mcam_write_entry(mbox_get(mbox));
712 	if (req == NULL) {
713 		rc = -ENOSPC;
714 		goto exit;
715 	}
716 	req->set_cntr = flow->use_ctr;
717 	req->cntr = flow->ctr_id;
718 	req->entry = entry;
719 
720 	req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
721 	req->enable_entry = 1;
722 	if (flow->nix_intf == NIX_INTF_RX)
723 		flow->npc_action |= (uint64_t)flow->recv_queue << 20;
724 	req->entry_data.action = flow->npc_action;
725 
726 	/*
727 	 * Driver sets vtag action on per interface basis, not
728 	 * per flow basis. It is a matter of how we decide to support
729 	 * this pmd specific behavior. There are two ways:
730 	 *	1. Inherit the vtag action from the one configured
731 	 *	   for this interface. This can be read from the
732 	 *	   vtag_action configured for default mcam entry of
733 	 *	   this pf_func.
734 	 *	2. Do not support vtag action with npc_flow.
735 	 *
736 	 * Second approach is used now.
737 	 */
738 	req->entry_data.vtag_action = flow->vtag_action;
739 
740 	for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
741 		req->entry_data.kw[idx] = flow->mcam_data[idx];
742 		req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
743 	}
744 
745 	if (flow->nix_intf == NIX_INTF_RX) {
746 		if (inl_dev && inl_dev->is_multi_channel &&
747 		    (flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
748 			pf_func = nix_inl_dev_pffunc_get();
749 			req->entry_data.action &= ~(GENMASK(19, 4));
750 			req->entry_data.action |= (uint64_t)pf_func << 4;
751 			flow->npc_action &= ~(GENMASK(19, 4));
752 			flow->npc_action |= (uint64_t)pf_func << 4;
753 
754 			npc_mcam_set_channel(flow, req, inl_dev->channel, inl_dev->chan_mask,
755 					     false);
756 		} else if (flow->has_rep) {
757 			pf_func = (flow->rep_act_pf_func == 0) ? flow->rep_pf_func :
758 								 flow->rep_act_pf_func;
759 			req->entry_data.action &= ~(GENMASK(19, 4));
760 			req->entry_data.action |= (uint64_t)pf_func << 4;
761 			flow->npc_action &= ~(GENMASK(19, 4));
762 			flow->npc_action |= (uint64_t)pf_func << 4;
763 			npc_mcam_set_channel(flow, req, flow->rep_channel, (BIT_ULL(12) - 1),
764 					     false);
765 		} else if (npc->is_sdp_link) {
766 			npc_mcam_set_channel(flow, req, npc->sdp_channel, npc->sdp_channel_mask,
767 					     pst->is_second_pass_rule);
768 		} else {
769 			npc_mcam_set_channel(flow, req, npc->channel, (BIT_ULL(12) - 1),
770 					     pst->is_second_pass_rule);
771 		}
772 		/*
773 		 * For second pass rule, set LA LTYPE to CPT_HDR.
774 		 * For all other rules, set LA LTYPE to match both 1st pass and 2nd pass ltypes.
775 		 */
776 		if (pst->is_second_pass_rule || (!pst->is_second_pass_rule && pst->has_eth_type)) {
777 			la_offset = plt_popcount32(npc->keyx_supp_nmask[flow->nix_intf] &
778 						   ((1ULL << 9 /* LA offset */) - 1));
779 			la_offset *= 4;
780 
781 			mask = ~((0xfULL << la_offset));
782 			req->entry_data.kw[0] &= mask;
783 			req->entry_data.kw_mask[0] &= mask;
784 			flow->mcam_data[0] &= mask;
785 			flow->mcam_mask[0] &= mask;
786 			if (pst->is_second_pass_rule) {
787 				req->entry_data.kw[0] |= ((uint64_t)NPC_LT_LA_CPT_HDR) << la_offset;
788 				req->entry_data.kw_mask[0] |= (0xFULL << la_offset);
789 				flow->mcam_data[0] |= ((uint64_t)NPC_LT_LA_CPT_HDR) << la_offset;
790 				flow->mcam_mask[0] |= (0xFULL << la_offset);
791 			} else {
792 				/* Mask ltype ETHER (0x2) and CPT_HDR (0xa)  */
793 				req->entry_data.kw[0] |= (0x2ULL << la_offset);
794 				req->entry_data.kw_mask[0] |= (0x7ULL << la_offset);
795 				flow->mcam_data[0] |= (0x2ULL << la_offset);
796 				flow->mcam_mask[0] |= (0x7ULL << la_offset);
797 			}
798 		}
799 	}
800 
801 	rc = mbox_process_msg(mbox, (void *)&rsp);
802 	if (rc != 0)
803 		goto exit;
804 
805 	flow->mcam_id = entry;
806 
807 	if (flow->use_ctr)
808 		flow->ctr_id = ctr;
809 	rc = 0;
810 
811 exit:
812 	mbox_put(mbox);
813 	if (rc)
814 		roc_nix_mcast_list_free(npc->mbox, flow->mcast_grp_index);
815 	return rc;
816 }
817 
818 static void
npc_set_vlan_ltype(struct npc_parse_state * pst)819 npc_set_vlan_ltype(struct npc_parse_state *pst)
820 {
821 	uint64_t val, mask;
822 	uint8_t lb_offset;
823 
824 	lb_offset =
825 		plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] &
826 				   ((1ULL << NPC_LTYPE_LB_OFFSET) - 1));
827 	lb_offset *= 4;
828 
829 	mask = ~((0xfULL << lb_offset));
830 	pst->flow->mcam_data[0] &= mask;
831 	pst->flow->mcam_mask[0] &= mask;
832 	/* NPC_LT_LB_CTAG: 0b0010, NPC_LT_LB_STAG_QINQ: 0b0011
833 	 * Set LB layertype/mask as 0b0010/0b1110 to match both.
834 	 */
835 	val = ((uint64_t)(NPC_LT_LB_CTAG & NPC_LT_LB_STAG_QINQ)) << lb_offset;
836 	pst->flow->mcam_data[0] |= val;
837 	pst->flow->mcam_mask[0] |= (0xeULL << lb_offset);
838 }
839 
840 static void
npc_set_ipv6ext_ltype_mask(struct npc_parse_state * pst)841 npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst)
842 {
843 	uint8_t lc_offset, lcflag_offset;
844 	uint64_t val, mask;
845 
846 	lc_offset =
847 		plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] &
848 				   ((1ULL << NPC_LTYPE_LC_OFFSET) - 1));
849 	lc_offset *= 4;
850 
851 	mask = ~((0xfULL << lc_offset));
852 	pst->flow->mcam_data[0] &= mask;
853 	pst->flow->mcam_mask[0] &= mask;
854 	/* NPC_LT_LC_IP6: 0b0100, NPC_LT_LC_IP6_EXT: 0b0101
855 	 * Set LC layertype/mask as 0b0100/0b1110 to match both.
856 	 */
857 	val = ((uint64_t)(NPC_LT_LC_IP6 & NPC_LT_LC_IP6_EXT)) << lc_offset;
858 	pst->flow->mcam_data[0] |= val;
859 	pst->flow->mcam_mask[0] |= (0xeULL << lc_offset);
860 
861 	/* If LC LFLAG is non-zero, set the LC LFLAG mask to 0xF. In general
862 	 * case flag mask is set same as the value in data. For example, to
863 	 * match 3 VLANs, flags have to match a range of values. But, for IPv6
864 	 * extended attributes matching, we need an exact match. Hence, set the
865 	 * mask as 0xF. This is done only if LC LFLAG value is non-zero,
866 	 * because for AH and ESP, LC LFLAG is zero and we don't want to match
867 	 * zero in LFLAG.
868 	 */
869 	if (pst->npc->keyx_supp_nmask[pst->nix_intf] & (1ULL << NPC_LFLAG_LC_OFFSET)) {
870 		lcflag_offset = plt_popcount32(pst->npc->keyx_supp_nmask[pst->nix_intf] &
871 					       ((1ULL << NPC_LFLAG_LC_OFFSET) - 1));
872 		lcflag_offset *= 4;
873 
874 		mask = (0xfULL << lcflag_offset);
875 		val = pst->flow->mcam_data[0] & mask;
876 		if (val)
877 			pst->flow->mcam_mask[0] |= mask;
878 	}
879 }
880 
881 int
npc_program_mcam(struct npc * npc,struct npc_parse_state * pst,bool mcam_alloc)882 npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
883 {
884 	struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
885 	/* This is non-LDATA part in search key */
886 	uint64_t key_data[2] = {0ULL, 0ULL};
887 	uint64_t key_mask[2] = {0ULL, 0ULL};
888 	int key_len, bit = 0, index, rc = 0;
889 	struct nix_inl_dev *inl_dev = NULL;
890 	int intf = pst->flow->nix_intf;
891 	struct mcam_entry *base_entry;
892 	bool skip_base_rule = false;
893 	int off, idx, data_off = 0;
894 	uint8_t lid, mask, data;
895 	struct idev_cfg *idev;
896 	uint16_t layer_info;
897 	uint64_t lt, flags;
898 	struct mbox *mbox;
899 
900 	/* Skip till Layer A data start */
901 	while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
902 		if (npc->keyx_supp_nmask[intf] & (1 << bit))
903 			data_off++;
904 		bit++;
905 	}
906 
907 	/* Each bit represents 1 nibble */
908 	data_off *= 4;
909 
910 	index = 0;
911 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
912 		/* Offset in key */
913 		off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
914 		lt = pst->lt[lid] & 0xf;
915 		flags = pst->flags[lid] & 0xff;
916 
917 		/* NPC_LAYER_KEX_S */
918 		layer_info = ((npc->keyx_supp_nmask[intf] >> off) & 0x7);
919 
920 		if (layer_info) {
921 			for (idx = 0; idx <= 2; idx++) {
922 				if (layer_info & (1 << idx)) {
923 					if (idx == 2) {
924 						data = lt;
925 						mask = 0xf;
926 					} else if (idx == 1) {
927 						data = ((flags >> 4) & 0xf);
928 						mask = ((flags >> 4) & 0xf);
929 					} else {
930 						data = (flags & 0xf);
931 						mask = (flags & 0xf);
932 					}
933 
934 					if (data_off >= 64) {
935 						data_off = 0;
936 						index++;
937 					}
938 					key_data[index] |= ((uint64_t)data << data_off);
939 
940 					if (lt == 0)
941 						mask = 0;
942 					key_mask[index] |= ((uint64_t)mask << data_off);
943 					data_off += 4;
944 				}
945 			}
946 		}
947 	}
948 
949 	/* Copy this into mcam string */
950 	key_len = (pst->npc->keyx_len[intf] + 7) / 8;
951 	memcpy(pst->flow->mcam_data, key_data, key_len);
952 	memcpy(pst->flow->mcam_mask, key_mask, key_len);
953 
954 	if (pst->set_vlan_ltype_mask)
955 		npc_set_vlan_ltype(pst);
956 
957 	if (pst->set_ipv6ext_ltype_mask)
958 		npc_set_ipv6ext_ltype_mask(pst);
959 
960 	idev = idev_get_cfg();
961 	if (idev)
962 		inl_dev = idev->nix_inl_dev;
963 	if (inl_dev && inl_dev->is_multi_channel &&
964 	    (pst->flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC))
965 		skip_base_rule = true;
966 
967 	if ((pst->is_vf || pst->flow->is_rep_vf) && pst->flow->nix_intf == NIX_INTF_RX &&
968 	    !skip_base_rule) {
969 		if (pst->flow->has_rep)
970 			mbox = mbox_get(pst->flow->rep_mbox);
971 		else
972 			mbox = mbox_get(npc->mbox);
973 		(void)mbox_alloc_msg_npc_read_base_steer_rule(mbox);
974 		rc = mbox_process_msg(mbox, (void *)&base_rule_rsp);
975 		if (rc) {
976 			mbox_put(mbox);
977 			plt_err("Failed to fetch VF's base MCAM entry");
978 			return rc;
979 		}
980 		mbox_put(mbox);
981 		base_entry = &base_rule_rsp->entry_data;
982 		for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
983 			pst->flow->mcam_data[idx] |= base_entry->kw[idx];
984 			pst->flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
985 		}
986 	}
987 
988 	/*
989 	 * Now we have mcam data and mask formatted as
990 	 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
991 	 * hole is present if key_len is odd number of nibbles.
992 	 * mcam data must be split into 64 bits + 48 bits segments
993 	 * for each back W0, W1.
994 	 */
995 
996 	if (mcam_alloc)
997 		return npc_mcam_alloc_and_write(npc, pst->flow, pst);
998 	else
999 		return 0;
1000 }
1001 
1002 int
npc_flow_enable_all_entries(struct npc * npc,bool enable)1003 npc_flow_enable_all_entries(struct npc *npc, bool enable)
1004 {
1005 	struct nix_inl_dev *inl_dev;
1006 	struct npc_flow_list *list;
1007 	struct roc_npc_flow *flow;
1008 	struct idev_cfg *idev;
1009 	int rc = 0, idx;
1010 
1011 	/* Free any MCAM counters and delete flow list */
1012 	for (idx = 0; idx < npc->flow_max_priority; idx++) {
1013 		list = &npc->flow_list[idx];
1014 		TAILQ_FOREACH(flow, list, next) {
1015 			flow->enable = enable;
1016 			rc = npc_mcam_write_entry(npc->mbox, flow);
1017 			if (rc)
1018 				return rc;
1019 		}
1020 	}
1021 
1022 	list = &npc->ipsec_list;
1023 	idev = idev_get_cfg();
1024 	if (!idev)
1025 		return 0;
1026 	inl_dev = idev->nix_inl_dev;
1027 
1028 	if (inl_dev) {
1029 		TAILQ_FOREACH(flow, list, next) {
1030 			flow->enable = enable;
1031 			rc = npc_mcam_write_entry(inl_dev->dev.mbox, flow);
1032 			if (rc)
1033 				return rc;
1034 		}
1035 	}
1036 	return rc;
1037 }
1038 
1039 int
npc_flow_free_all_resources(struct npc * npc)1040 npc_flow_free_all_resources(struct npc *npc)
1041 {
1042 	struct roc_npc_flow *flow;
1043 	int rc, idx;
1044 
1045 	/* Free all MCAM entries allocated */
1046 	rc = npc_mcam_free_all_entries(npc);
1047 
1048 	/* Free any MCAM counters and delete flow list */
1049 	for (idx = 0; idx < npc->flow_max_priority; idx++) {
1050 		while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
1051 			npc_rss_group_free(npc, flow);
1052 			if (flow->ctr_id != NPC_COUNTER_NONE) {
1053 				rc |= npc_mcam_clear_counter(npc->mbox, flow->ctr_id);
1054 				rc |= npc_mcam_free_counter(npc->mbox, flow->ctr_id);
1055 			}
1056 
1057 			if (flow->is_sampling_rule)
1058 				roc_nix_mcast_list_free(npc->mbox, flow->mcast_grp_index);
1059 
1060 			npc_delete_prio_list_entry(npc, flow);
1061 
1062 			TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
1063 			plt_free(flow);
1064 		}
1065 	}
1066 	return rc;
1067 }
1068