1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2024 Marvell.
3 */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7
8 #include <cnxk_flow.h>
9 #include <cnxk_rep.h>
10 #include <cnxk_rep_msg.h>
11
12 #define DEFAULT_DUMP_FILE_NAME "/tmp/fdump"
13 #define MAX_BUFFER_SIZE 1500
14
15 const struct cnxk_rte_flow_action_info action_info[] = {
16 [RTE_FLOW_ACTION_TYPE_MARK] = {sizeof(struct rte_flow_action_mark)},
17 [RTE_FLOW_ACTION_TYPE_VF] = {sizeof(struct rte_flow_action_vf)},
18 [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = {sizeof(struct rte_flow_action_port_id)},
19 [RTE_FLOW_ACTION_TYPE_PORT_ID] = {sizeof(struct rte_flow_action_port_id)},
20 [RTE_FLOW_ACTION_TYPE_QUEUE] = {sizeof(struct rte_flow_action_queue)},
21 [RTE_FLOW_ACTION_TYPE_RSS] = {sizeof(struct rte_flow_action_rss)},
22 [RTE_FLOW_ACTION_TYPE_SECURITY] = {sizeof(struct rte_flow_action_security)},
23 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = {sizeof(struct rte_flow_action_of_set_vlan_vid)},
24 [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = {sizeof(struct rte_flow_action_of_push_vlan)},
25 [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = {sizeof(struct rte_flow_action_of_set_vlan_pcp)},
26 [RTE_FLOW_ACTION_TYPE_METER] = {sizeof(struct rte_flow_action_meter)},
27 [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = {sizeof(struct rte_flow_action_vxlan_encap)},
28 [RTE_FLOW_ACTION_TYPE_COUNT] = {sizeof(struct rte_flow_action_count)},
29 };
30
31 static void
cnxk_flow_params_count(const struct rte_flow_item pattern[],const struct rte_flow_action actions[],uint16_t * n_pattern,uint16_t * n_action)32 cnxk_flow_params_count(const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
33 uint16_t *n_pattern, uint16_t *n_action)
34 {
35 int i = 0;
36
37 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++)
38 i++;
39
40 *n_pattern = ++i;
41 plt_rep_dbg("Total patterns is %d", *n_pattern);
42
43 i = 0;
44 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
45 i++;
46 *n_action = ++i;
47 plt_rep_dbg("Total actions is %d", *n_action);
48 }
49
50 static void
populate_attr_data(void * buffer,uint32_t * length,const struct rte_flow_attr * attr)51 populate_attr_data(void *buffer, uint32_t *length, const struct rte_flow_attr *attr)
52 {
53 uint32_t sz = sizeof(struct rte_flow_attr);
54 uint32_t len;
55
56 cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_ATTR, sz);
57
58 len = *length;
59 /* Populate the attribute data */
60 rte_memcpy(RTE_PTR_ADD(buffer, len), attr, sz);
61 len += sz;
62
63 *length = len;
64 }
65
66 static uint16_t
prepare_pattern_data(const struct rte_flow_item * pattern,uint16_t nb_pattern,uint64_t * pattern_data)67 prepare_pattern_data(const struct rte_flow_item *pattern, uint16_t nb_pattern,
68 uint64_t *pattern_data)
69 {
70 cnxk_pattern_hdr_t hdr;
71 uint16_t len = 0;
72 int i = 0;
73
74 for (i = 0; i < nb_pattern; i++) {
75 /* Populate the pattern type hdr */
76 memset(&hdr, 0, sizeof(cnxk_pattern_hdr_t));
77 hdr.type = pattern->type;
78 if (pattern->spec) {
79 hdr.spec_sz = term[pattern->type].item_size;
80 hdr.last_sz = 0;
81 hdr.mask_sz = term[pattern->type].item_size;
82 }
83
84 rte_memcpy(RTE_PTR_ADD(pattern_data, len), &hdr, sizeof(cnxk_pattern_hdr_t));
85 len += sizeof(cnxk_pattern_hdr_t);
86
87 /* Copy pattern spec data */
88 if (pattern->spec) {
89 rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->spec,
90 term[pattern->type].item_size);
91 len += term[pattern->type].item_size;
92 }
93
94 /* Copy pattern last data */
95 if (pattern->last) {
96 rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->last,
97 term[pattern->type].item_size);
98 len += term[pattern->type].item_size;
99 }
100
101 /* Copy pattern mask data */
102 if (pattern->mask) {
103 rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->mask,
104 term[pattern->type].item_size);
105 len += term[pattern->type].item_size;
106 }
107 pattern++;
108 }
109
110 return len;
111 }
112
113 static void
populate_pattern_data(void * buffer,uint32_t * length,const struct rte_flow_item * pattern,uint16_t nb_pattern)114 populate_pattern_data(void *buffer, uint32_t *length, const struct rte_flow_item *pattern,
115 uint16_t nb_pattern)
116 {
117 uint64_t pattern_data[BUFSIZ];
118 uint32_t len;
119 uint32_t sz;
120
121 memset(pattern_data, 0, BUFSIZ * sizeof(uint64_t));
122 /* Prepare pattern_data */
123 sz = prepare_pattern_data(pattern, nb_pattern, pattern_data);
124
125 cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_PATTERN, sz);
126
127 len = *length;
128 /* Populate the pattern data */
129 rte_memcpy(RTE_PTR_ADD(buffer, len), pattern_data, sz);
130 len += sz;
131
132 *length = len;
133 }
134
135 static uint16_t
populate_rss_action_conf(const struct rte_flow_action_rss * conf,void * rss_action_conf)136 populate_rss_action_conf(const struct rte_flow_action_rss *conf, void *rss_action_conf)
137 {
138 int len, sz;
139
140 len = sizeof(struct rte_flow_action_rss) - sizeof(conf->key) - sizeof(conf->queue);
141
142 if (rss_action_conf)
143 rte_memcpy(rss_action_conf, conf, len);
144
145 if (conf->key) {
146 sz = conf->key_len;
147 if (rss_action_conf)
148 rte_memcpy(RTE_PTR_ADD(rss_action_conf, len), conf->key, sz);
149 len += sz;
150 }
151
152 if (conf->queue) {
153 sz = conf->queue_num * sizeof(conf->queue);
154 if (rss_action_conf)
155 rte_memcpy(RTE_PTR_ADD(rss_action_conf, len), conf->queue, sz);
156 len += sz;
157 }
158
159 return len;
160 }
161
162 static uint16_t
populate_vxlan_encap_action_conf(const struct rte_flow_action_vxlan_encap * vxlan_conf,void * vxlan_encap_action_data)163 populate_vxlan_encap_action_conf(const struct rte_flow_action_vxlan_encap *vxlan_conf,
164 void *vxlan_encap_action_data)
165 {
166 const struct rte_flow_item *pattern;
167 uint64_t nb_patterns = 0;
168 uint16_t len, sz;
169
170 pattern = vxlan_conf->definition;
171 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++)
172 nb_patterns++;
173
174 /* +1 for RTE_FLOW_ITEM_TYPE_END */
175 nb_patterns++;
176
177 len = sizeof(uint64_t);
178 rte_memcpy(vxlan_encap_action_data, &nb_patterns, len);
179 pattern = vxlan_conf->definition;
180 /* Prepare pattern_data */
181 sz = prepare_pattern_data(pattern, nb_patterns, RTE_PTR_ADD(vxlan_encap_action_data, len));
182
183 len += sz;
184 if (len > BUFSIZ) {
185 plt_err("Incomplete item definition loaded, len %d", len);
186 return 0;
187 }
188
189 return len;
190 }
191
192 static uint16_t
prepare_action_data(const struct rte_flow_action * action,uint16_t nb_action,uint64_t * action_data)193 prepare_action_data(const struct rte_flow_action *action, uint16_t nb_action, uint64_t *action_data)
194 {
195 void *action_conf_data = NULL;
196 cnxk_action_hdr_t hdr;
197 uint16_t len = 0, sz = 0;
198 int i = 0;
199
200 for (i = 0; i < nb_action; i++) {
201 if (action->conf) {
202 switch (action->type) {
203 case RTE_FLOW_ACTION_TYPE_RSS:
204 sz = populate_rss_action_conf(action->conf, NULL);
205 action_conf_data = plt_zmalloc(sz, 0);
206 if (populate_rss_action_conf(action->conf, action_conf_data) !=
207 sz) {
208 plt_err("Populating RSS action config failed");
209 return 0;
210 }
211 break;
212 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
213 action_conf_data = plt_zmalloc(BUFSIZ, 0);
214 sz = populate_vxlan_encap_action_conf(action->conf,
215 action_conf_data);
216 if (!sz) {
217 plt_err("Populating vxlan action config failed");
218 return 0;
219 }
220 break;
221 default:
222 sz = action_info[action->type].conf_size;
223 action_conf_data = plt_zmalloc(sz, 0);
224 rte_memcpy(action_conf_data, action->conf, sz);
225 break;
226 };
227 }
228
229 /* Populate the action type hdr */
230 memset(&hdr, 0, sizeof(cnxk_action_hdr_t));
231 hdr.type = action->type;
232 hdr.conf_sz = sz;
233
234 rte_memcpy(RTE_PTR_ADD(action_data, len), &hdr, sizeof(cnxk_action_hdr_t));
235 len += sizeof(cnxk_action_hdr_t);
236
237 /* Copy action conf data */
238 if (action_conf_data) {
239 rte_memcpy(RTE_PTR_ADD(action_data, len), action_conf_data, sz);
240 len += sz;
241 plt_free(action_conf_data);
242 action_conf_data = NULL;
243 }
244
245 action++;
246 }
247
248 return len;
249 }
250
251 static void
populate_action_data(void * buffer,uint32_t * length,const struct rte_flow_action * action,uint16_t nb_action)252 populate_action_data(void *buffer, uint32_t *length, const struct rte_flow_action *action,
253 uint16_t nb_action)
254 {
255 uint64_t action_data[BUFSIZ];
256 uint32_t len;
257 uint32_t sz;
258
259 memset(action_data, 0, BUFSIZ * sizeof(uint64_t));
260 /* Prepare action_data */
261 sz = prepare_action_data(action, nb_action, action_data);
262
263 cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_ACTION, sz);
264
265 len = *length;
266 /* Populate the action data */
267 rte_memcpy(RTE_PTR_ADD(buffer, len), action_data, sz);
268 len += sz;
269
270 *length = len;
271 }
272
273 static int
process_flow_destroy(struct cnxk_rep_dev * rep_dev,void * flow,cnxk_rep_msg_ack_data_t * adata)274 process_flow_destroy(struct cnxk_rep_dev *rep_dev, void *flow, cnxk_rep_msg_ack_data_t *adata)
275 {
276 cnxk_rep_msg_flow_destroy_meta_t msg_fd_meta;
277 uint32_t len = 0, rc;
278 void *buffer;
279 size_t size;
280
281 /* If representor not representing any active VF, return 0 */
282 if (!rep_dev->is_vf_active)
283 return 0;
284
285 size = MAX_BUFFER_SIZE;
286 buffer = plt_zmalloc(size, 0);
287 if (!buffer) {
288 plt_err("Failed to allocate mem");
289 rc = -ENOMEM;
290 goto fail;
291 }
292
293 cnxk_rep_msg_populate_header(buffer, &len);
294
295 msg_fd_meta.portid = rep_dev->rep_id;
296 msg_fd_meta.flow = (uint64_t)flow;
297 plt_rep_dbg("Flow Destroy: flow 0x%" PRIu64 ", portid %d", msg_fd_meta.flow,
298 msg_fd_meta.portid);
299 cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fd_meta,
300 sizeof(cnxk_rep_msg_flow_destroy_meta_t),
301 CNXK_REP_MSG_FLOW_DESTROY);
302 cnxk_rep_msg_populate_msg_end(buffer, &len);
303
304 rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
305 if (rc) {
306 plt_err("Failed to process the message, err %d", rc);
307 goto fail;
308 }
309
310 return 0;
311 fail:
312 return rc;
313 }
314
315 static int
copy_flow_dump_file(FILE * target)316 copy_flow_dump_file(FILE *target)
317 {
318 FILE *source = NULL;
319 int pos;
320 char ch;
321
322 source = fopen(DEFAULT_DUMP_FILE_NAME, "r");
323 if (source == NULL) {
324 plt_err("Failed to read default dump file: %s, err %d", DEFAULT_DUMP_FILE_NAME,
325 errno);
326 return errno;
327 }
328
329 fseek(source, 0L, SEEK_END);
330 pos = ftell(source);
331 fseek(source, 0L, SEEK_SET);
332 while (pos--) {
333 ch = fgetc(source);
334 fputc(ch, target);
335 }
336
337 fclose(source);
338
339 /* Remove the default file after reading */
340 remove(DEFAULT_DUMP_FILE_NAME);
341
342 return 0;
343 }
344
345 static int
process_flow_dump(struct cnxk_rep_dev * rep_dev,struct rte_flow * flow,FILE * file,cnxk_rep_msg_ack_data_t * adata)346 process_flow_dump(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, FILE *file,
347 cnxk_rep_msg_ack_data_t *adata)
348 {
349 cnxk_rep_msg_flow_dump_meta_t msg_fp_meta;
350 uint32_t len = 0, rc;
351 void *buffer;
352 size_t size;
353
354 size = MAX_BUFFER_SIZE;
355 buffer = plt_zmalloc(size, 0);
356 if (!buffer) {
357 plt_err("Failed to allocate mem");
358 rc = -ENOMEM;
359 goto fail;
360 }
361
362 cnxk_rep_msg_populate_header(buffer, &len);
363
364 msg_fp_meta.portid = rep_dev->rep_id;
365 msg_fp_meta.flow = (uint64_t)flow;
366 msg_fp_meta.is_stdout = (file == stdout) ? 1 : 0;
367
368 plt_rep_dbg("Flow Dump: flow 0x%" PRIu64 ", portid %d stdout %d", msg_fp_meta.flow,
369 msg_fp_meta.portid, msg_fp_meta.is_stdout);
370 cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fp_meta,
371 sizeof(cnxk_rep_msg_flow_dump_meta_t),
372 CNXK_REP_MSG_FLOW_DUMP);
373 cnxk_rep_msg_populate_msg_end(buffer, &len);
374
375 rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
376 if (rc) {
377 plt_err("Failed to process the message, err %d", rc);
378 goto fail;
379 }
380
381 /* Copy contents from default file to user file */
382 if (file != stdout)
383 copy_flow_dump_file(file);
384
385 return 0;
386 fail:
387 return rc;
388 }
389
390 static int
process_flow_flush(struct cnxk_rep_dev * rep_dev,cnxk_rep_msg_ack_data_t * adata)391 process_flow_flush(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata)
392 {
393 cnxk_rep_msg_flow_flush_meta_t msg_ff_meta;
394 uint32_t len = 0, rc;
395 void *buffer;
396 size_t size;
397
398 size = MAX_BUFFER_SIZE;
399 buffer = plt_zmalloc(size, 0);
400 if (!buffer) {
401 plt_err("Failed to allocate mem");
402 rc = -ENOMEM;
403 goto fail;
404 }
405
406 cnxk_rep_msg_populate_header(buffer, &len);
407
408 msg_ff_meta.portid = rep_dev->rep_id;
409 plt_rep_dbg("Flow Flush: portid %d", msg_ff_meta.portid);
410 cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_ff_meta,
411 sizeof(cnxk_rep_msg_flow_flush_meta_t),
412 CNXK_REP_MSG_FLOW_FLUSH);
413 cnxk_rep_msg_populate_msg_end(buffer, &len);
414
415 rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
416 if (rc) {
417 plt_err("Failed to process the message, err %d", rc);
418 goto fail;
419 }
420
421 return 0;
422 fail:
423 return rc;
424 }
425
426 static int
process_flow_query(struct cnxk_rep_dev * rep_dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,cnxk_rep_msg_ack_data_t * adata)427 process_flow_query(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow,
428 const struct rte_flow_action *action, void *data, cnxk_rep_msg_ack_data_t *adata)
429 {
430 cnxk_rep_msg_flow_query_meta_t *msg_fq_meta;
431 struct rte_flow_query_count *query = data;
432 uint32_t len = 0, rc, sz, total_sz;
433 uint64_t action_data[BUFSIZ];
434 void *buffer;
435 size_t size;
436
437 size = MAX_BUFFER_SIZE;
438 buffer = plt_zmalloc(size, 0);
439 if (!buffer) {
440 plt_err("Failed to allocate mem");
441 rc = -ENOMEM;
442 goto fail;
443 }
444
445 cnxk_rep_msg_populate_header(buffer, &len);
446
447 memset(action_data, 0, BUFSIZ * sizeof(uint64_t));
448 sz = prepare_action_data(action, 1, action_data);
449 total_sz = sz + sizeof(cnxk_rep_msg_flow_query_meta_t);
450
451 msg_fq_meta = plt_zmalloc(total_sz, 0);
452 if (!msg_fq_meta) {
453 plt_err("Failed to allocate memory");
454 rc = -ENOMEM;
455 goto fail;
456 }
457
458 msg_fq_meta->portid = rep_dev->rep_id;
459 msg_fq_meta->reset = query->reset;
460 ;
461 msg_fq_meta->flow = (uint64_t)flow;
462 /* Populate the action data */
463 rte_memcpy(msg_fq_meta->action_data, action_data, sz);
464 msg_fq_meta->action_data_sz = sz;
465
466 plt_rep_dbg("Flow query: flow 0x%" PRIu64 ", portid %d, action type %d total sz %d "
467 "action sz %d", msg_fq_meta->flow, msg_fq_meta->portid, action->type, total_sz,
468 sz);
469 cnxk_rep_msg_populate_command_meta(buffer, &len, msg_fq_meta, total_sz,
470 CNXK_REP_MSG_FLOW_QUERY);
471 cnxk_rep_msg_populate_msg_end(buffer, &len);
472
473 rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
474 if (rc) {
475 plt_err("Failed to process the message, err %d", rc);
476 goto free;
477 }
478
479 rte_free(msg_fq_meta);
480
481 return 0;
482
483 free:
484 rte_free(msg_fq_meta);
485 fail:
486 return rc;
487 }
488
489 static int
process_flow_rule(struct cnxk_rep_dev * rep_dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],cnxk_rep_msg_ack_data_t * adata,cnxk_rep_msg_t msg)490 process_flow_rule(struct cnxk_rep_dev *rep_dev, const struct rte_flow_attr *attr,
491 const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
492 cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)
493 {
494 cnxk_rep_msg_flow_create_meta_t msg_fc_meta;
495 uint16_t n_pattern, n_action;
496 uint32_t len = 0, rc = 0;
497 void *buffer;
498 size_t size;
499
500 size = MAX_BUFFER_SIZE;
501 buffer = plt_zmalloc(size, 0);
502 if (!buffer) {
503 plt_err("Failed to allocate mem");
504 rc = -ENOMEM;
505 goto fail;
506 }
507
508 /* Get no of actions and patterns */
509 cnxk_flow_params_count(pattern, actions, &n_pattern, &n_action);
510
511 /* Adding the header */
512 cnxk_rep_msg_populate_header(buffer, &len);
513
514 /* Representor port identified as rep_xport queue */
515 msg_fc_meta.portid = rep_dev->rep_id;
516 msg_fc_meta.nb_pattern = n_pattern;
517 msg_fc_meta.nb_action = n_action;
518
519 cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fc_meta,
520 sizeof(cnxk_rep_msg_flow_create_meta_t), msg);
521
522 /* Populate flow create parameters data */
523 populate_attr_data(buffer, &len, attr);
524 populate_pattern_data(buffer, &len, pattern, n_pattern);
525 populate_action_data(buffer, &len, actions, n_action);
526
527 cnxk_rep_msg_populate_msg_end(buffer, &len);
528
529 rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
530 if (rc) {
531 plt_err("Failed to process the message, err %d", rc);
532 goto fail;
533 }
534
535 return 0;
536 fail:
537 return rc;
538 }
539
540 static struct rte_flow *
cnxk_rep_flow_create_native(struct rte_eth_dev * eth_dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)541 cnxk_rep_flow_create_native(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
542 const struct rte_flow_item pattern[],
543 const struct rte_flow_action actions[], struct rte_flow_error *error)
544 {
545 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
546 struct roc_npc_flow *flow;
547 uint16_t new_entry;
548 int rc;
549
550 flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true);
551 if (!flow) {
552 plt_err("Fail to create flow");
553 goto fail;
554 }
555
556 /* Shifting the rules with higher priority than exception path rules */
557 new_entry = (uint16_t)flow->mcam_id;
558 rc = cnxk_eswitch_flow_rule_shift(rep_dev->hw_func, &new_entry);
559 if (rc) {
560 plt_err("Failed to shift the flow rule entry, err %d", rc);
561 goto fail;
562 }
563
564 flow->mcam_id = new_entry;
565
566 return (struct rte_flow *)flow;
567 fail:
568 return NULL;
569 }
570
571 static struct rte_flow *
cnxk_rep_flow_create(struct rte_eth_dev * eth_dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)572 cnxk_rep_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
573 const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
574 struct rte_flow_error *error)
575 {
576 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
577 struct rte_flow *flow = NULL;
578 cnxk_rep_msg_ack_data_t adata;
579 int rc = 0;
580
581 /* If representor not representing any active VF, return 0 */
582 if (!rep_dev->is_vf_active) {
583 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
584 "Represented VF not active yet");
585 return 0;
586 }
587
588 if (rep_dev->native_repte)
589 return cnxk_rep_flow_create_native(eth_dev, attr, pattern, actions, error);
590
591 rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_CREATE);
592 if (!rc || adata.u.sval < 0) {
593 if (adata.u.sval < 0) {
594 rc = (int)adata.u.sval;
595 rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
596 NULL, "Failed to validate flow");
597 goto fail;
598 }
599
600 flow = adata.u.data;
601 if (!flow) {
602 rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
603 NULL, "Failed to create flow");
604 goto fail;
605 }
606 } else {
607 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
608 "Failed to create flow");
609 goto fail;
610 }
611 plt_rep_dbg("Flow %p created successfully", adata.u.data);
612
613 return flow;
614 fail:
615 return NULL;
616 }
617
618 static int
cnxk_rep_flow_validate(struct rte_eth_dev * eth_dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)619 cnxk_rep_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
620 const struct rte_flow_item pattern[], const struct rte_flow_action actions[],
621 struct rte_flow_error *error)
622 {
623 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
624 cnxk_rep_msg_ack_data_t adata;
625 int rc = 0;
626
627 /* If representor not representing any active VF, return 0 */
628 if (!rep_dev->is_vf_active) {
629 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
630 "Represented VF not active yet");
631 return 0;
632 }
633
634 if (rep_dev->native_repte)
635 return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true);
636
637 rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_VALIDATE);
638 if (!rc || adata.u.sval < 0) {
639 if (adata.u.sval < 0) {
640 rc = (int)adata.u.sval;
641 rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
642 NULL, "Failed to validate flow");
643 goto fail;
644 }
645 } else {
646 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
647 "Failed to validate flow");
648 }
649
650 plt_rep_dbg("Flow %p validated successfully", adata.u.data);
651
652 fail:
653 return rc;
654 }
655
656 static int
cnxk_rep_flow_destroy(struct rte_eth_dev * eth_dev,struct rte_flow * flow,struct rte_flow_error * error)657 cnxk_rep_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
658 struct rte_flow_error *error)
659 {
660 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
661 cnxk_rep_msg_ack_data_t adata;
662 int rc;
663
664 /* If representor not representing any active VF, return 0 */
665 if (!rep_dev->is_vf_active) {
666 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
667 "Represented VF not active yet");
668 return 0;
669 }
670
671 if (rep_dev->native_repte)
672 return cnxk_flow_destroy_common(eth_dev, (struct roc_npc_flow *)flow, error, true);
673
674 rc = process_flow_destroy(rep_dev, flow, &adata);
675 if (rc || adata.u.sval < 0) {
676 if (adata.u.sval < 0)
677 rc = adata.u.sval;
678
679 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
680 "Failed to destroy flow");
681 goto fail;
682 }
683
684 return 0;
685 fail:
686 return rc;
687 }
688
689 static int
cnxk_rep_flow_query(struct rte_eth_dev * eth_dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * error)690 cnxk_rep_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow,
691 const struct rte_flow_action *action, void *data, struct rte_flow_error *error)
692 {
693 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
694 cnxk_rep_msg_ack_data_t adata;
695 int rc;
696
697 /* If representor not representing any active VF, return 0 */
698 if (!rep_dev->is_vf_active) {
699 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
700 "Represented VF not active yet");
701 return 0;
702 }
703
704 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
705 rc = -ENOTSUP;
706 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
707 "Only COUNT is supported in query");
708 goto fail;
709 }
710
711 if (rep_dev->native_repte)
712 return cnxk_flow_query_common(eth_dev, flow, action, data, error, true);
713
714 rc = process_flow_query(rep_dev, flow, action, data, &adata);
715 if (rc || adata.u.sval < 0) {
716 if (adata.u.sval < 0)
717 rc = adata.u.sval;
718
719 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
720 "Failed to query the flow");
721 goto fail;
722 }
723
724 rte_memcpy(data, adata.u.data, adata.size);
725
726 return 0;
727 fail:
728 return rc;
729 }
730
731 static int
cnxk_rep_flow_flush(struct rte_eth_dev * eth_dev,struct rte_flow_error * error)732 cnxk_rep_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error)
733 {
734 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
735 cnxk_rep_msg_ack_data_t adata;
736 int rc;
737
738 /* If representor not representing any active VF, return 0 */
739 if (!rep_dev->is_vf_active) {
740 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
741 "Represented VF not active yet");
742 return 0;
743 }
744
745 if (rep_dev->native_repte)
746 return cnxk_flow_flush_common(eth_dev, error, true);
747
748 rc = process_flow_flush(rep_dev, &adata);
749 if (rc || adata.u.sval < 0) {
750 if (adata.u.sval < 0)
751 rc = adata.u.sval;
752
753 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
754 "Failed to destroy flow");
755 goto fail;
756 }
757
758 return 0;
759 fail:
760 return rc;
761 }
762
763 static int
cnxk_rep_flow_dev_dump(struct rte_eth_dev * eth_dev,struct rte_flow * flow,FILE * file,struct rte_flow_error * error)764 cnxk_rep_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file,
765 struct rte_flow_error *error)
766 {
767 struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
768 cnxk_rep_msg_ack_data_t adata;
769 int rc;
770
771 /* If representor not representing any active VF, return 0 */
772 if (!rep_dev->is_vf_active) {
773 rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
774 "Represented VF not active yet");
775 return 0;
776 }
777
778 if (rep_dev->native_repte)
779 return cnxk_flow_dev_dump_common(eth_dev, flow, file, error, true);
780
781 rc = process_flow_dump(rep_dev, flow, file, &adata);
782 if (rc || adata.u.sval < 0) {
783 if (adata.u.sval < 0)
784 rc = adata.u.sval;
785
786 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
787 "Failed to destroy flow");
788 goto fail;
789 }
790
791 return 0;
792 fail:
793 return rc;
794 }
795
796 static int
cnxk_rep_flow_isolate(struct rte_eth_dev * eth_dev __rte_unused,int enable __rte_unused,struct rte_flow_error * error)797 cnxk_rep_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, int enable __rte_unused,
798 struct rte_flow_error *error)
799 {
800 /* If we support, we need to un-install the default mcam
801 * entry for this port.
802 */
803
804 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
805 "Flow isolation not supported");
806
807 return -rte_errno;
808 }
809
810 struct rte_flow_ops cnxk_rep_flow_ops = {
811 .validate = cnxk_rep_flow_validate,
812 .create = cnxk_rep_flow_create,
813 .destroy = cnxk_rep_flow_destroy,
814 .query = cnxk_rep_flow_query,
815 .flush = cnxk_rep_flow_flush,
816 .isolate = cnxk_rep_flow_isolate,
817 .dev_dump = cnxk_rep_flow_dev_dump,
818 };
819