1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4 #include "roc_api.h"
5 #include "roc_priv.h"
6
7 static void
npc_prep_mcam_ldata(uint8_t * ptr,const uint8_t * data,int len)8 npc_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
9 {
10 int idx;
11
12 for (idx = 0; idx < len; idx++)
13 ptr[idx] = data[len - 1 - idx];
14 }
15
16 static int
npc_check_copysz(size_t size,size_t len)17 npc_check_copysz(size_t size, size_t len)
18 {
19 if (len <= size)
20 return len;
21 return NPC_ERR_PARAM;
22 }
23
24 static inline int
npc_mem_is_zero(const void * mem,int len)25 npc_mem_is_zero(const void *mem, int len)
26 {
27 const char *m = mem;
28 int i;
29
30 for (i = 0; i < len; i++) {
31 if (m[i] != 0)
32 return 0;
33 }
34 return 1;
35 }
36
37 static void
npc_set_hw_mask(struct npc_parse_item_info * info,struct npc_xtract_info * xinfo,char * hw_mask)38 npc_set_hw_mask(struct npc_parse_item_info *info, struct npc_xtract_info *xinfo,
39 char *hw_mask)
40 {
41 int max_off, offset;
42 int j;
43
44 if (xinfo->enable == 0)
45 return;
46
47 if (xinfo->hdr_off < info->hw_hdr_len)
48 return;
49
50 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
51
52 if (max_off > info->len)
53 max_off = info->len;
54
55 offset = xinfo->hdr_off - info->hw_hdr_len;
56 for (j = offset; j < max_off; j++)
57 hw_mask[j] = 0xff;
58 }
59
60 static void
npc_ipv6_hash_mask_get(struct npc_xtract_info * xinfo,struct npc_parse_item_info * info)61 npc_ipv6_hash_mask_get(struct npc_xtract_info *xinfo, struct npc_parse_item_info *info)
62 {
63 int offset = 0;
64 uint8_t *hw_mask = info->hw_mask;
65
66 offset = xinfo->hdr_off - info->hw_hdr_len;
67 memset(&hw_mask[offset], 0xFF, NPC_HASH_FIELD_LEN);
68 }
69
70 void
npc_get_hw_supp_mask(struct npc_parse_state * pst,struct npc_parse_item_info * info,int lid,int lt)71 npc_get_hw_supp_mask(struct npc_parse_state *pst, struct npc_parse_item_info *info, int lid, int lt)
72 {
73 struct npc_xtract_info *xinfo, *lfinfo;
74 char *hw_mask = info->hw_mask;
75 int lf_cfg = 0;
76 int i, j;
77 int intf;
78
79 intf = pst->nix_intf;
80 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
81 memset(hw_mask, 0, info->len);
82
83 for (i = 0; i < NPC_MAX_LD; i++) {
84 if (pst->npc->hash_extract_cap && xinfo[i].use_hash)
85 npc_ipv6_hash_mask_get(&xinfo[i], info);
86 else
87 npc_set_hw_mask(info, &xinfo[i], hw_mask);
88 }
89
90 for (i = 0; i < NPC_MAX_LD; i++) {
91 if (xinfo[i].flags_enable == 0)
92 continue;
93
94 lf_cfg = pst->npc->prx_lfcfg[i].i;
95 if (lf_cfg == lid) {
96 for (j = 0; j < NPC_MAX_LFL; j++) {
97 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
98 npc_set_hw_mask(info, &lfinfo[0], hw_mask);
99 }
100 }
101 }
102 }
103
104 inline int
npc_mask_is_supported(const char * mask,const char * hw_mask,int len)105 npc_mask_is_supported(const char *mask, const char *hw_mask, int len)
106 {
107 /*
108 * If no hw_mask, assume nothing is supported.
109 * mask is never NULL
110 */
111 if (hw_mask == NULL)
112 return npc_mem_is_zero(mask, len);
113
114 while (len--) {
115 if ((mask[len] | hw_mask[len]) != hw_mask[len])
116 return 0; /* False */
117 }
118 return 1;
119 }
120
121 int
npc_parse_item_basic(const struct roc_npc_item_info * item,struct npc_parse_item_info * info)122 npc_parse_item_basic(const struct roc_npc_item_info *item,
123 struct npc_parse_item_info *info)
124 {
125 /* Item must not be NULL */
126 if (item == NULL)
127 return NPC_ERR_PARAM;
128
129 /* Don't support ranges */
130 if (item->last != NULL)
131 return NPC_ERR_INVALID_RANGE;
132
133 /* If spec is NULL, both mask and last must be NULL, this
134 * makes it to match ANY value (eq to mask = 0).
135 * Setting either mask or last without spec is an error
136 */
137 if (item->spec == NULL) {
138 if (item->last == NULL && item->mask == NULL) {
139 info->spec = NULL;
140 return 0;
141 }
142 return NPC_ERR_INVALID_SPEC;
143 }
144
145 /* We have valid spec */
146 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
147 info->spec = item->spec;
148
149 /* If mask is not set, use default mask, err if default mask is
150 * also NULL.
151 */
152 if (item->mask == NULL) {
153 if (info->def_mask == NULL)
154 return NPC_ERR_PARAM;
155 info->mask = info->def_mask;
156 } else {
157 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
158 info->mask = item->mask;
159 }
160
161 if (info->mask == NULL)
162 return NPC_ERR_INVALID_MASK;
163
164 /* mask specified must be subset of hw supported mask
165 * mask | hw_mask == hw_mask
166 */
167 if (!npc_mask_is_supported(info->mask, info->hw_mask, info->len))
168 return NPC_ERR_INVALID_MASK;
169
170 return 0;
171 }
172
173 static int
npc_update_extraction_data(struct npc_parse_state * pst,struct npc_parse_item_info * info,struct npc_xtract_info * xinfo)174 npc_update_extraction_data(struct npc_parse_state *pst,
175 struct npc_parse_item_info *info,
176 struct npc_xtract_info *xinfo)
177 {
178 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
179 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
180 struct npc_xtract_info *x;
181 int hdr_off;
182 int len = 0;
183
184 x = xinfo;
185 if (x->len > NPC_MAX_EXTRACT_DATA_LEN)
186 return NPC_ERR_INVALID_SIZE;
187
188 len = x->len;
189 hdr_off = x->hdr_off;
190
191 if (hdr_off < info->hw_hdr_len)
192 return 0;
193
194 if (x->enable == 0)
195 return 0;
196
197 hdr_off -= info->hw_hdr_len;
198
199 if (hdr_off >= info->len)
200 return 0;
201
202 if (hdr_off + len > info->len)
203 len = info->len - hdr_off;
204
205 len = npc_check_copysz((ROC_NPC_MAX_MCAM_WIDTH_DWORDS * 8) - x->key_off,
206 len);
207 if (len < 0)
208 return NPC_ERR_INVALID_SIZE;
209
210 /* Need to reverse complete structure so that dest addr is at
211 * MSB so as to program the MCAM using mcam_data & mcam_mask
212 * arrays
213 */
214 npc_prep_mcam_ldata(int_info, (const uint8_t *)info->spec + hdr_off,
215 x->len);
216 npc_prep_mcam_ldata(int_info_mask,
217 (const uint8_t *)info->mask + hdr_off, x->len);
218
219 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
220 memcpy(pst->mcam_data + x->key_off, int_info, len);
221 return 0;
222 }
223
224 static int
npc_field_hash_secret_get(struct npc * npc,struct npc_hash_cfg * hash_cfg)225 npc_field_hash_secret_get(struct npc *npc, struct npc_hash_cfg *hash_cfg)
226 {
227 struct npc_get_field_hash_info_req *req;
228 struct npc_get_field_hash_info_rsp *rsp;
229 struct mbox *mbox = mbox_get(npc->mbox);
230 int rc = 0;
231
232 req = mbox_alloc_msg_npc_get_field_hash_info(mbox);
233 if (req == NULL)
234 return -ENOSPC;
235 rc = mbox_process_msg(mbox, (void *)&rsp);
236 if (rc) {
237 plt_err("Failed to fetch field hash secret key");
238 goto done;
239 }
240
241 mbox_memcpy(hash_cfg->secret_key, rsp->secret_key, sizeof(rsp->secret_key));
242 mbox_memcpy(hash_cfg->hash_mask, rsp->hash_mask, sizeof(rsp->hash_mask));
243 mbox_memcpy(hash_cfg->hash_ctrl, rsp->hash_ctrl, sizeof(rsp->hash_ctrl));
244
245 done:
246 mbox_put(mbox);
247 return rc;
248 }
249
250 static inline void
be32_to_cpu_array(uint32_t * dst,const uint32_t * src,size_t len)251 be32_to_cpu_array(uint32_t *dst, const uint32_t *src, size_t len)
252 {
253 size_t i;
254
255 for (i = 0; i < len; i++)
256 dst[i] = plt_be_to_cpu_32(src[i]);
257 }
258
259 static uint64_t
npc_wide_extract(const uint64_t input[],size_t start_bit,size_t width_bits)260 npc_wide_extract(const uint64_t input[], size_t start_bit, size_t width_bits)
261 {
262 const uint64_t mask = ~(uint64_t)((~(__uint128_t)0) << width_bits);
263 const size_t msb = start_bit + width_bits - 1;
264 const size_t lword = start_bit >> 6;
265 const size_t uword = msb >> 6;
266 size_t lbits;
267 uint64_t hi, lo;
268
269 if (lword == uword)
270 return (input[lword] >> (start_bit & 63)) & mask;
271
272 lbits = 64 - (start_bit & 63);
273 hi = input[uword];
274 lo = (input[lword] >> (start_bit & 63));
275 return ((hi << lbits) | lo) & mask;
276 }
277
278 static void
npc_lshift_key(uint64_t * key,size_t key_bit_len)279 npc_lshift_key(uint64_t *key, size_t key_bit_len)
280 {
281 uint64_t prev_orig_word = 0;
282 uint64_t cur_orig_word = 0;
283 size_t extra = key_bit_len % 64;
284 size_t max_idx = key_bit_len / 64;
285 size_t i;
286
287 if (extra)
288 max_idx++;
289
290 for (i = 0; i < max_idx; i++) {
291 cur_orig_word = key[i];
292 key[i] = key[i] << 1;
293 key[i] |= ((prev_orig_word >> 63) & 0x1);
294 prev_orig_word = cur_orig_word;
295 }
296 }
297
298 static uint32_t
npc_toeplitz_hash(const uint64_t * data,uint64_t * key,size_t data_bit_len,size_t key_bit_len)299 npc_toeplitz_hash(const uint64_t *data, uint64_t *key, size_t data_bit_len, size_t key_bit_len)
300 {
301 uint32_t hash_out = 0;
302 uint64_t temp_data = 0;
303 int i;
304
305 for (i = data_bit_len - 1; i >= 0; i--) {
306 temp_data = (data[i / 64]);
307 temp_data = temp_data >> (i % 64);
308 temp_data &= 0x1;
309 if (temp_data)
310 hash_out ^= (uint32_t)(npc_wide_extract(key, key_bit_len - 32, 32));
311
312 npc_lshift_key(key, key_bit_len);
313 }
314
315 return hash_out;
316 }
317
318 static uint32_t
npc_field_hash_calc(uint64_t * ldata,struct npc_hash_cfg * hash_cfg,uint8_t intf,uint8_t hash_idx)319 npc_field_hash_calc(uint64_t *ldata, struct npc_hash_cfg *hash_cfg, uint8_t intf, uint8_t hash_idx)
320 {
321 uint64_t hash_key[3];
322 uint64_t data_padded[2];
323 uint32_t field_hash;
324
325 hash_key[0] = hash_cfg->secret_key[1] << 31;
326 hash_key[0] |= hash_cfg->secret_key[2];
327 hash_key[1] = hash_cfg->secret_key[1] >> 33;
328 hash_key[1] |= hash_cfg->secret_key[0] << 31;
329 hash_key[2] = hash_cfg->secret_key[0] >> 33;
330
331 data_padded[0] = hash_cfg->hash_mask[intf][hash_idx][0] & ldata[0];
332 data_padded[1] = hash_cfg->hash_mask[intf][hash_idx][1] & ldata[1];
333 field_hash = npc_toeplitz_hash(data_padded, hash_key, 128, 159);
334
335 field_hash &= hash_cfg->hash_ctrl[intf][hash_idx] >> 32;
336 field_hash |= hash_cfg->hash_ctrl[intf][hash_idx];
337 return field_hash;
338 }
339
340 static int
npc_ipv6_field_hash_get(struct npc * npc,const uint32_t * ip6addr,uint8_t intf,int hash_idx,uint32_t * hash)341 npc_ipv6_field_hash_get(struct npc *npc, const uint32_t *ip6addr, uint8_t intf, int hash_idx,
342 uint32_t *hash)
343 {
344 #define IPV6_WORDS 4
345 uint32_t ipv6_addr[IPV6_WORDS];
346 struct npc_hash_cfg hash_cfg;
347 uint64_t ldata[2];
348 int rc = 0;
349
350 rc = npc_field_hash_secret_get(npc, &hash_cfg);
351 if (rc)
352 return -1;
353
354 be32_to_cpu_array(ipv6_addr, ip6addr, IPV6_WORDS);
355 ldata[0] = (uint64_t)ipv6_addr[2] << 32 | ipv6_addr[3];
356 ldata[1] = (uint64_t)ipv6_addr[0] << 32 | ipv6_addr[1];
357 *hash = npc_field_hash_calc(ldata, &hash_cfg, intf, hash_idx);
358
359 return 0;
360 }
361
362 static int
npc_hash_field_get(struct npc_xtract_info * xinfo,const struct roc_npc_flow_item_ipv6 * ipv6_spec,const struct roc_npc_flow_item_ipv6 * ipv6_mask,uint8_t * hash_field)363 npc_hash_field_get(struct npc_xtract_info *xinfo, const struct roc_npc_flow_item_ipv6 *ipv6_spec,
364 const struct roc_npc_flow_item_ipv6 *ipv6_mask, uint8_t *hash_field)
365 {
366 const uint8_t *ipv6_hdr_spec, *ipv6_hdr_mask;
367 struct roc_ipv6_hdr ipv6_buf;
368 int offset = xinfo->hdr_off;
369
370 memset(&ipv6_buf, 0, sizeof(ipv6_buf));
371
372 ipv6_hdr_spec = (const uint8_t *)&ipv6_spec->hdr;
373 ipv6_hdr_mask = (const uint8_t *)&ipv6_mask->hdr;
374
375 /* Check if mask is set for the field to be hashed */
376 if (memcmp(ipv6_hdr_mask + offset, &ipv6_buf, ROC_IPV6_ADDR_LEN) == 0)
377 return 0;
378
379 /* Extract the field to be hashed from item spec */
380 memcpy(hash_field, ipv6_hdr_spec + offset, ROC_IPV6_ADDR_LEN);
381 return 1;
382 }
383
384 int
npc_process_ipv6_field_hash(const struct roc_npc_flow_item_ipv6 * ipv6_spec,const struct roc_npc_flow_item_ipv6 * ipv6_mask,struct npc_parse_state * pst,uint8_t ltype)385 npc_process_ipv6_field_hash(const struct roc_npc_flow_item_ipv6 *ipv6_spec,
386 const struct roc_npc_flow_item_ipv6 *ipv6_mask,
387 struct npc_parse_state *pst, uint8_t ltype)
388 {
389 struct npc_lid_lt_xtract_info *lid_lt_xinfo;
390 uint8_t hash_field[ROC_IPV6_ADDR_LEN];
391 struct npc_xtract_info *xinfo;
392 struct roc_ipv6_hdr ipv6_buf;
393 uint32_t hash = 0, mask;
394 int intf, i, rc = 0;
395
396 memset(&ipv6_buf, 0, sizeof(ipv6_buf));
397 memset(hash_field, 0, sizeof(hash_field));
398
399 intf = pst->nix_intf;
400 lid_lt_xinfo = &pst->npc->prx_dxcfg[intf][NPC_LID_LC][ltype];
401
402 for (i = 0; i < NPC_MAX_LD; i++) {
403 xinfo = &lid_lt_xinfo->xtract[i];
404 if (!xinfo->use_hash)
405 continue;
406
407 rc = npc_hash_field_get(xinfo, ipv6_spec, ipv6_mask, hash_field);
408 if (rc == 0)
409 continue;
410
411 rc = npc_ipv6_field_hash_get(pst->npc, (const uint32_t *)hash_field, intf, i,
412 &hash);
413 if (rc)
414 return rc;
415
416 mask = GENMASK(31, 0);
417 memcpy(pst->mcam_mask + xinfo->key_off, (uint8_t *)&mask, 4);
418 memcpy(pst->mcam_data + xinfo->key_off, (uint8_t *)&hash, 4);
419 }
420
421 return 0;
422 }
423
424 int
npc_update_parse_state(struct npc_parse_state * pst,struct npc_parse_item_info * info,int lid,int lt,uint8_t flags)425 npc_update_parse_state(struct npc_parse_state *pst, struct npc_parse_item_info *info, int lid,
426 int lt, uint8_t flags)
427 {
428 struct npc_lid_lt_xtract_info *xinfo;
429 struct roc_npc_flow_dump_data *dump;
430 struct npc_xtract_info *lfinfo;
431 int intf, lf_cfg;
432 int i, j, rc = 0;
433
434 pst->layer_mask |= lid;
435 pst->lt[lid] = lt;
436 pst->flags[lid] = flags;
437
438 intf = pst->nix_intf;
439 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
440 if (xinfo->is_terminating)
441 pst->terminate = 1;
442
443 if (info->spec == NULL)
444 goto done;
445
446 for (i = 0; i < NPC_MAX_LD; i++) {
447 if (xinfo->xtract[i].use_hash)
448 continue;
449 rc = npc_update_extraction_data(pst, info, &xinfo->xtract[i]);
450 if (rc != 0)
451 return rc;
452 }
453
454 for (i = 0; i < NPC_MAX_LD; i++) {
455 if (xinfo->xtract[i].flags_enable == 0)
456 continue;
457 if (xinfo->xtract[i].use_hash)
458 continue;
459
460 lf_cfg = pst->npc->prx_lfcfg[i].i;
461 if (lf_cfg == lid) {
462 for (j = 0; j < NPC_MAX_LFL; j++) {
463 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
464 rc = npc_update_extraction_data(pst, info,
465 &lfinfo[0]);
466 if (rc != 0)
467 return rc;
468
469 if (lfinfo[0].enable)
470 pst->flags[lid] = j;
471 }
472 }
473 }
474
475 done:
476 dump = &pst->flow->dump_data[pst->flow->num_patterns++];
477 dump->lid = lid;
478 dump->ltype = lt;
479 pst->pattern++;
480 return 0;
481 }
482
483 int
npc_mcam_init(struct npc * npc,struct roc_npc_flow * flow,int mcam_id)484 npc_mcam_init(struct npc *npc, struct roc_npc_flow *flow, int mcam_id)
485 {
486 struct npc_mcam_write_entry_req *req;
487 struct npc_mcam_write_entry_rsq *rsp;
488 struct mbox *mbox = mbox_get(npc->mbox);
489 int rc = 0, idx;
490
491 req = mbox_alloc_msg_npc_mcam_write_entry(mbox);
492 if (req == NULL) {
493 rc = -ENOSPC;
494 goto exit;
495 }
496 req->set_cntr = 0;
497 req->cntr = 0;
498 req->entry = mcam_id;
499
500 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
501 req->enable_entry = 1;
502 req->entry_data.action = flow->npc_action;
503 req->entry_data.vtag_action = flow->vtag_action;
504
505 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
506 req->entry_data.kw[idx] = 0x0;
507 req->entry_data.kw_mask[idx] = 0x0;
508 }
509
510 if (flow->nix_intf == NIX_INTF_RX) {
511 req->entry_data.kw[0] |= (uint64_t)npc->channel;
512 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
513 } else {
514 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
515
516 pf_func = plt_cpu_to_be_16(pf_func);
517 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
518 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
519 }
520
521 rc = mbox_process_msg(mbox, (void *)&rsp);
522 if (rc != 0) {
523 plt_err("npc: mcam initialisation write failed");
524 goto exit;
525 }
526 rc = 0;
527 exit:
528 mbox_put(mbox);
529 return rc;
530 }
531
532 int
npc_mcam_move(struct mbox * mbox,uint16_t old_ent,uint16_t new_ent)533 npc_mcam_move(struct mbox *mbox, uint16_t old_ent, uint16_t new_ent)
534 {
535 struct npc_mcam_shift_entry_req *req;
536 struct npc_mcam_shift_entry_rsp *rsp;
537 int rc = -ENOSPC;
538
539 /* Old entry is disabled & it's contents are moved to new_entry,
540 * new entry is enabled finally.
541 */
542 req = mbox_alloc_msg_npc_mcam_shift_entry(mbox_get(mbox));
543 if (req == NULL)
544 goto exit;
545 req->curr_entry[0] = old_ent;
546 req->new_entry[0] = new_ent;
547 req->shift_count = 1;
548
549 rc = mbox_process_msg(mbox, (void *)&rsp);
550 if (rc)
551 goto exit;
552
553 rc = 0;
554 exit:
555 mbox_put(mbox);
556 return rc;
557 }
558
559 enum SHIFT_DIR {
560 SLIDE_ENTRIES_TO_LOWER_INDEX,
561 SLIDE_ENTRIES_TO_HIGHER_INDEX,
562 };
563
564 static int
npc_slide_mcam_entries(struct mbox * mbox,struct npc * npc,int prio,uint16_t * free_mcam_id,int dir)565 npc_slide_mcam_entries(struct mbox *mbox, struct npc *npc, int prio,
566 uint16_t *free_mcam_id, int dir)
567 {
568 uint16_t to_mcam_id = 0, from_mcam_id = 0;
569 struct npc_prio_flow_list_head *list;
570 struct npc_prio_flow_entry *curr = 0;
571 int rc = 0;
572
573 list = &npc->prio_flow_list[prio];
574
575 to_mcam_id = *free_mcam_id;
576 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
577 curr = TAILQ_LAST(list, npc_prio_flow_list_head);
578 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
579 curr = TAILQ_FIRST(list);
580
581 while (curr) {
582 from_mcam_id = curr->flow->mcam_id;
583 if ((dir == SLIDE_ENTRIES_TO_HIGHER_INDEX &&
584 from_mcam_id < to_mcam_id) ||
585 (dir == SLIDE_ENTRIES_TO_LOWER_INDEX &&
586 from_mcam_id > to_mcam_id)) {
587 /* Newly allocated entry and the source entry given to
588 * npc_mcam_shift_entry_req will be in disabled state.
589 * Initialise and enable before moving an entry into
590 * this mcam.
591 */
592 rc = npc_mcam_init(npc, curr->flow, to_mcam_id);
593 if (rc)
594 return rc;
595 rc = npc_mcam_move(mbox, from_mcam_id, to_mcam_id);
596 if (rc)
597 return rc;
598 curr->flow->mcam_id = to_mcam_id;
599 to_mcam_id = from_mcam_id;
600 }
601
602 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
603 curr = TAILQ_PREV(curr, npc_prio_flow_list_head, next);
604 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
605 curr = TAILQ_NEXT(curr, next);
606 }
607
608 *free_mcam_id = from_mcam_id;
609
610 return 0;
611 }
612
613 /*
614 * The mcam_alloc request is first made with NPC_MCAM_LOWER_PRIO with the last
615 * entry in the requested priority level as the reference entry. If it fails,
616 * the alloc request is retried with NPC_MCAM_HIGHER_PRIO with the first entry
617 * in the next lower priority level as the reference entry. After obtaining
618 * the free MCAM from kernel, we check if it is at the right user requested
619 * priority level. If not, the flow rules are moved across MCAM entries till
620 * the user requested priority levels are met.
621 * The MCAM sorting algorithm works as below.
622 * For any given free MCAM obtained from the kernel, there are 3 possibilities.
623 * Case 1:
624 * There are entries belonging to higher user priority level (numerically
625 * lesser) in higher mcam indices. In this case, the entries with higher user
626 * priority are slided towards lower indices and a free entry is created in the
627 * higher indices.
628 * Example:
629 * Assume free entry = 1610, user requested priority = 2 and
630 * max user priority levels = 5 with below entries in respective priority
631 * levels.
632 * 0: 1630, 1635, 1641
633 * 1: 1646, 1650, 1651
634 * 2: 1652, 1655, 1660
635 * 3: 1661, 1662, 1663, 1664
636 * 4: 1665, 1667, 1670
637 *
638 * Entries (1630, 1635, 1641, 1646, 1650, 1651) have to be slided down towards
639 * lower indices.
640 * Shifting sequence will be as below:
641 * 1610 <- 1630 <- 1635 <- 1641 <- 1646 <- 1650 <- 1651
642 * Entry 1651 will be free-ed for writing the new flow. This entry will now
643 * become the head of priority level 2.
644 *
645 * Case 2:
646 * There are entries belonging to lower user priority level (numerically
647 * bigger) in lower mcam indices. In this case, the entries with lower user
648 * priority are slided towards higher indices and a free entry is created in the
649 * lower indices.
650 *
651 * Example:
652 * free entry = 1653, user requested priority = 0
653 * 0: 1630, 1635, 1641
654 * 1: 1646, 1650, 1651
655 * 2: 1652, 1655, 1660
656 * 3: 1661, 1662, 1663, 1664
657 * 4: 1665, 1667, 1670
658 *
659 * Entries (1646, 1650, 1651, 1652) have to be slided up towards higher
660 * indices.
661 * Shifting sequence will be as below:
662 * 1646 -> 1650 -> 1651 -> 1652 -> 1653
663 * Entry 1646 will be free-ed for writing the new flow. This entry will now
664 * become the last element in priority level 0.
665 *
666 * Case 3:
667 * Free mcam is at the right place, ie, all higher user priority level
668 * mcams lie in lower indices and all lower user priority level mcams lie in
669 * higher mcam indices.
670 *
671 * The priority level lists are scanned first for case (1) and if the
672 * condition is found true, case(2) is skipped because they are mutually
673 * exclusive. For example, consider below state.
674 * 0: 1630, 1635, 1641
675 * 1: 1646, 1650, 1651
676 * 2: 1652, 1655, 1660
677 * 3: 1661, 1662, 1663, 1664
678 * 4: 1665, 1667, 1670
679 * free entry = 1610, user requested priority = 2
680 *
681 * Case 1: Here the condition is;
682 * "if (requested_prio > prio_idx && free_mcam < tail->flow->mcam_id ){}"
683 * If this condition is true, it means at some higher priority level than
684 * requested priority level, there are entries at lower indices than the given
685 * free mcam. That is, we have found in levels 0,1 there is an mcam X which is
686 * greater than 1610.
687 * If, for any free entry and user req prio, the above condition is true, then
688 * the below case(2) condition will always be false since the lists are kept
689 * sorted. The case(2) condition is;
690 * "if (requested_prio < prio_idx && free_mcam > head->flow->mcam_id){}"
691 * There can't be entries at lower indices at priority level higher
692 * than the requested priority level. That is, here, at levels 3 & 4 there
693 * cannot be any entry greater than 1610. Because all entries in 3 & 4 must be
694 * greater than X which was found to be greater than 1610 earlier.
695 */
696
697 static int
npc_sort_mcams_by_user_prio_level(struct mbox * mbox,struct npc_prio_flow_entry * flow_list_entry,struct npc * npc,struct npc_mcam_alloc_entry_rsp * rsp)698 npc_sort_mcams_by_user_prio_level(struct mbox *mbox,
699 struct npc_prio_flow_entry *flow_list_entry,
700 struct npc *npc,
701 struct npc_mcam_alloc_entry_rsp *rsp)
702 {
703 int requested_prio = flow_list_entry->flow->priority;
704 struct npc_prio_flow_entry *head, *tail;
705 struct npc_prio_flow_list_head *list;
706 uint16_t free_mcam = rsp->entry;
707 bool do_reverse_scan = true;
708 int prio_idx = 0, rc = 0;
709
710 while (prio_idx <= npc->flow_max_priority - 1) {
711 list = &npc->prio_flow_list[prio_idx];
712 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
713
714 /* requested priority is lower than current level
715 * ie, numerically req prio is higher
716 */
717 if ((requested_prio > prio_idx) && tail) {
718 /* but there are some mcams in current level
719 * at higher indices, ie, at priority lower
720 * than free_mcam.
721 */
722 if (free_mcam < tail->flow->mcam_id) {
723 rc = npc_slide_mcam_entries(
724 mbox, npc, prio_idx, &free_mcam,
725 SLIDE_ENTRIES_TO_LOWER_INDEX);
726 if (rc)
727 return rc;
728 do_reverse_scan = false;
729 }
730 }
731 prio_idx++;
732 }
733
734 prio_idx = npc->flow_max_priority - 1;
735 while (prio_idx && do_reverse_scan) {
736 list = &npc->prio_flow_list[prio_idx];
737 head = TAILQ_FIRST(list);
738
739 /* requested priority is higher than current level
740 * ie, numerically req prio is lower
741 */
742 if (requested_prio < prio_idx && head) {
743 /* but free mcam is higher than lowest priority
744 * mcam in current level
745 */
746 if (free_mcam > head->flow->mcam_id) {
747 rc = npc_slide_mcam_entries(
748 mbox, npc, prio_idx, &free_mcam,
749 SLIDE_ENTRIES_TO_HIGHER_INDEX);
750 if (rc)
751 return rc;
752 }
753 }
754 prio_idx--;
755 }
756 rsp->entry = free_mcam;
757 return rc;
758 }
759
760 static void
npc_insert_into_flow_list(struct npc * npc,struct npc_prio_flow_entry * entry)761 npc_insert_into_flow_list(struct npc *npc, struct npc_prio_flow_entry *entry)
762 {
763 struct npc_prio_flow_list_head *list;
764 struct npc_prio_flow_entry *curr;
765
766 list = &npc->prio_flow_list[entry->flow->priority];
767 curr = TAILQ_FIRST(list);
768
769 if (curr) {
770 while (curr) {
771 if (entry->flow->mcam_id > curr->flow->mcam_id)
772 curr = TAILQ_NEXT(curr, next);
773 else
774 break;
775 }
776 if (curr)
777 TAILQ_INSERT_BEFORE(curr, entry, next);
778 else
779 TAILQ_INSERT_TAIL(list, entry, next);
780 } else {
781 TAILQ_INSERT_HEAD(list, entry, next);
782 }
783 }
784
785 static int
npc_allocate_mcam_entry(struct mbox * mbox,int prio,struct npc_mcam_alloc_entry_rsp * rsp_local,int ref_entry)786 npc_allocate_mcam_entry(struct mbox *mbox, int prio,
787 struct npc_mcam_alloc_entry_rsp *rsp_local,
788 int ref_entry)
789 {
790 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
791 struct npc_mcam_alloc_entry_req *req;
792 struct npc_mcam_alloc_entry_rsp *rsp;
793 int rc = -ENOSPC;
794
795 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox_get(mbox));
796 if (req == NULL)
797 goto exit;
798 req->contig = 1;
799 req->count = 1;
800 req->priority = prio;
801 req->ref_entry = ref_entry;
802
803 rc = mbox_process_msg(mbox, (void *)&rsp_cmd);
804 if (rc)
805 goto exit;
806
807 if (!rsp_cmd->count) {
808 rc = -ENOSPC;
809 goto exit;
810 }
811
812 mbox_memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
813
814 rc = 0;
815 exit:
816 mbox_put(mbox);
817 return rc;
818 }
819
820 static void
npc_find_mcam_ref_entry(struct roc_npc_flow * flow,struct npc * npc,int * prio,int * ref_entry,int dir)821 npc_find_mcam_ref_entry(struct roc_npc_flow *flow, struct npc *npc, int *prio,
822 int *ref_entry, int dir)
823 {
824 struct npc_prio_flow_entry *head, *tail;
825 struct npc_prio_flow_list_head *list;
826 int prio_idx = flow->priority;
827
828 if (dir == NPC_MCAM_LOWER_PRIO) {
829 while (prio_idx >= 0) {
830 list = &npc->prio_flow_list[prio_idx];
831 head = TAILQ_FIRST(list);
832 if (head) {
833 *prio = NPC_MCAM_LOWER_PRIO;
834 *ref_entry = head->flow->mcam_id;
835 return;
836 }
837 prio_idx--;
838 }
839 } else if (dir == NPC_MCAM_HIGHER_PRIO) {
840 prio_idx = flow->priority;
841 while (prio_idx <= npc->flow_max_priority - 1) {
842 list = &npc->prio_flow_list[prio_idx];
843 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
844 if (tail) {
845 *prio = NPC_MCAM_HIGHER_PRIO;
846 *ref_entry = tail->flow->mcam_id;
847 return;
848 }
849 prio_idx++;
850 }
851 }
852 *prio = NPC_MCAM_ANY_PRIO;
853 *ref_entry = 0;
854 }
855
856 static int
npc_alloc_mcam_by_ref_entry(struct mbox * mbox,struct roc_npc_flow * flow,struct npc * npc,struct npc_mcam_alloc_entry_rsp * rsp_local)857 npc_alloc_mcam_by_ref_entry(struct mbox *mbox, struct roc_npc_flow *flow,
858 struct npc *npc,
859 struct npc_mcam_alloc_entry_rsp *rsp_local)
860 {
861 int prio, ref_entry = 0, rc = 0, dir = NPC_MCAM_LOWER_PRIO;
862 bool retry_done = false;
863
864 retry:
865 npc_find_mcam_ref_entry(flow, npc, &prio, &ref_entry, dir);
866 rc = npc_allocate_mcam_entry(mbox, prio, rsp_local, ref_entry);
867 if (rc && !retry_done) {
868 plt_npc_dbg(
869 "npc: Failed to allocate lower priority entry. Retrying for higher priority");
870
871 dir = NPC_MCAM_HIGHER_PRIO;
872 retry_done = true;
873 goto retry;
874 } else if (rc && retry_done) {
875 return rc;
876 }
877
878 return 0;
879 }
880
881 int
npc_get_free_mcam_entry(struct mbox * mbox,struct roc_npc_flow * flow,struct npc * npc)882 npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow,
883 struct npc *npc)
884 {
885 struct npc_mcam_alloc_entry_rsp rsp_local;
886 struct npc_prio_flow_entry *new_entry;
887 int rc = 0;
888
889 rc = npc_alloc_mcam_by_ref_entry(mbox, flow, npc, &rsp_local);
890
891 if (rc)
892 return rc;
893
894 new_entry = plt_zmalloc(sizeof(*new_entry), 0);
895 if (!new_entry)
896 return -ENOSPC;
897
898 new_entry->flow = flow;
899
900 plt_npc_dbg("kernel allocated MCAM entry %d", rsp_local.entry);
901
902 rc = npc_sort_mcams_by_user_prio_level(mbox, new_entry, npc,
903 &rsp_local);
904 if (rc)
905 goto err;
906
907 plt_npc_dbg("allocated MCAM entry after sorting %d", rsp_local.entry);
908 flow->mcam_id = rsp_local.entry;
909 npc_insert_into_flow_list(npc, new_entry);
910
911 return rsp_local.entry;
912 err:
913 plt_free(new_entry);
914 return rc;
915 }
916
917 void
npc_delete_prio_list_entry(struct npc * npc,struct roc_npc_flow * flow)918 npc_delete_prio_list_entry(struct npc *npc, struct roc_npc_flow *flow)
919 {
920 struct npc_prio_flow_list_head *list;
921 struct npc_prio_flow_entry *curr;
922
923 list = &npc->prio_flow_list[flow->priority];
924 curr = TAILQ_FIRST(list);
925
926 if (!curr)
927 return;
928
929 while (curr) {
930 if (flow->mcam_id == curr->flow->mcam_id) {
931 TAILQ_REMOVE(list, curr, next);
932 plt_free(curr);
933 break;
934 }
935 curr = TAILQ_NEXT(curr, next);
936 }
937 }
938