1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 * Copyright 2020 Mellanox Technologies, Ltd
4 */
5
6 #include <string.h>
7
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10
11 #include "rte_regexdev.h"
12 #include "rte_regexdev_core.h"
13 #include "rte_regexdev_driver.h"
14
15 static const char *MZ_RTE_REGEXDEV_DATA = "rte_regexdev_data";
16 struct rte_regexdev rte_regex_devices[RTE_MAX_REGEXDEV_DEVS];
17 /* Shared memory between primary and secondary processes. */
18 static struct {
19 struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS];
20 } *rte_regexdev_shared_data;
21
22 RTE_LOG_REGISTER_DEFAULT(rte_regexdev_logtype, INFO);
23
24 static uint16_t
regexdev_find_free_dev(void)25 regexdev_find_free_dev(void)
26 {
27 uint16_t i;
28
29 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
30 if (rte_regex_devices[i].state == RTE_REGEXDEV_UNUSED)
31 return i;
32 }
33 return RTE_MAX_REGEXDEV_DEVS;
34 }
35
36 static struct rte_regexdev*
regexdev_allocated(const char * name)37 regexdev_allocated(const char *name)
38 {
39 uint16_t i;
40
41 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
42 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
43 if (!strcmp(name, rte_regex_devices[i].data->dev_name))
44 return &rte_regex_devices[i];
45 }
46 return NULL;
47 }
48
49 static int
regexdev_shared_data_prepare(void)50 regexdev_shared_data_prepare(void)
51 {
52 const unsigned int flags = 0;
53 const struct rte_memzone *mz;
54
55 if (rte_regexdev_shared_data == NULL) {
56 /* Allocate port data and ownership shared memory. */
57 mz = rte_memzone_reserve(MZ_RTE_REGEXDEV_DATA,
58 sizeof(*rte_regexdev_shared_data),
59 rte_socket_id(), flags);
60 if (mz == NULL)
61 return -ENOMEM;
62
63 rte_regexdev_shared_data = mz->addr;
64 memset(rte_regexdev_shared_data->data, 0,
65 sizeof(rte_regexdev_shared_data->data));
66 }
67 return 0;
68 }
69
70 static int
regexdev_check_name(const char * name)71 regexdev_check_name(const char *name)
72 {
73 size_t name_len;
74
75 if (name == NULL) {
76 RTE_REGEXDEV_LOG_LINE(ERR, "Name can't be NULL");
77 return -EINVAL;
78 }
79 name_len = strnlen(name, RTE_REGEXDEV_NAME_MAX_LEN);
80 if (name_len == 0) {
81 RTE_REGEXDEV_LOG_LINE(ERR, "Zero length RegEx device name");
82 return -EINVAL;
83 }
84 if (name_len >= RTE_REGEXDEV_NAME_MAX_LEN) {
85 RTE_REGEXDEV_LOG_LINE(ERR, "RegEx device name is too long");
86 return -EINVAL;
87 }
88 return (int)name_len;
89
90 }
91
92 struct rte_regexdev *
rte_regexdev_register(const char * name)93 rte_regexdev_register(const char *name)
94 {
95 uint16_t dev_id;
96 int name_len;
97 struct rte_regexdev *dev;
98
99 name_len = regexdev_check_name(name);
100 if (name_len < 0)
101 return NULL;
102 dev = regexdev_allocated(name);
103 if (dev != NULL) {
104 RTE_REGEXDEV_LOG_LINE(ERR, "RegEx device already allocated");
105 return NULL;
106 }
107 dev_id = regexdev_find_free_dev();
108 if (dev_id == RTE_MAX_REGEXDEV_DEVS) {
109 RTE_REGEXDEV_LOG_LINE
110 (ERR, "Reached maximum number of RegEx devices");
111 return NULL;
112 }
113 if (regexdev_shared_data_prepare() < 0) {
114 RTE_REGEXDEV_LOG_LINE(ERR, "Cannot allocate RegEx shared data");
115 return NULL;
116 }
117
118 dev = &rte_regex_devices[dev_id];
119 dev->state = RTE_REGEXDEV_REGISTERED;
120 if (dev->data == NULL)
121 dev->data = &rte_regexdev_shared_data->data[dev_id];
122 else
123 memset(dev->data, 1, sizeof(*dev->data));
124 dev->data->dev_id = dev_id;
125 strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
126 return dev;
127 }
128
129 void
rte_regexdev_unregister(struct rte_regexdev * dev)130 rte_regexdev_unregister(struct rte_regexdev *dev)
131 {
132 dev->state = RTE_REGEXDEV_UNUSED;
133 }
134
135 struct rte_regexdev *
rte_regexdev_get_device_by_name(const char * name)136 rte_regexdev_get_device_by_name(const char *name)
137 {
138 if (regexdev_check_name(name) < 0)
139 return NULL;
140 return regexdev_allocated(name);
141 }
142
143 uint8_t
rte_regexdev_count(void)144 rte_regexdev_count(void)
145 {
146 int i;
147 int count = 0;
148
149 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
150 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
151 count++;
152 }
153 return count;
154 }
155
156 int
rte_regexdev_get_dev_id(const char * name)157 rte_regexdev_get_dev_id(const char *name)
158 {
159 int i;
160 int id = -EINVAL;
161
162 if (name == NULL)
163 return -EINVAL;
164 for (i = 0; i < RTE_MAX_REGEXDEV_DEVS; i++) {
165 if (rte_regex_devices[i].state != RTE_REGEXDEV_UNUSED)
166 if (strcmp(name, rte_regex_devices[i].data->dev_name)) {
167 id = rte_regex_devices[i].data->dev_id;
168 break;
169 }
170 }
171 return id;
172 }
173
174 int
rte_regexdev_is_valid_dev(uint16_t dev_id)175 rte_regexdev_is_valid_dev(uint16_t dev_id)
176 {
177 if (dev_id >= RTE_MAX_REGEXDEV_DEVS ||
178 rte_regex_devices[dev_id].state != RTE_REGEXDEV_READY)
179 return 0;
180 return 1;
181 }
182
183 static int
regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)184 regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
185 {
186 struct rte_regexdev *dev;
187
188 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
189 if (dev_info == NULL)
190 return -EINVAL;
191 dev = &rte_regex_devices[dev_id];
192 if (*dev->dev_ops->dev_info_get == NULL)
193 return -ENOTSUP;
194 return (*dev->dev_ops->dev_info_get)(dev, dev_info);
195
196 }
197
198 int
rte_regexdev_info_get(uint8_t dev_id,struct rte_regexdev_info * dev_info)199 rte_regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
200 {
201 return regexdev_info_get(dev_id, dev_info);
202 }
203
204 int
rte_regexdev_configure(uint8_t dev_id,const struct rte_regexdev_config * cfg)205 rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
206 {
207 struct rte_regexdev *dev;
208 struct rte_regexdev_info dev_info;
209 int ret;
210
211 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
212 if (cfg == NULL)
213 return -EINVAL;
214 dev = &rte_regex_devices[dev_id];
215 if (*dev->dev_ops->dev_configure == NULL)
216 return -ENOTSUP;
217 if (dev->data->dev_started) {
218 RTE_REGEXDEV_LOG_LINE
219 (ERR, "Dev %u must be stopped to allow configuration",
220 dev_id);
221 return -EBUSY;
222 }
223 ret = regexdev_info_get(dev_id, &dev_info);
224 if (ret < 0)
225 return ret;
226 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_CROSS_BUFFER_SCAN_F) &&
227 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_CROSS_BUFFER_F)) {
228 RTE_REGEXDEV_LOG_LINE(ERR,
229 "Dev %u doesn't support cross buffer scan",
230 dev_id);
231 return -EINVAL;
232 }
233 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_AS_END_F) &&
234 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)) {
235 RTE_REGEXDEV_LOG_LINE(ERR,
236 "Dev %u doesn't support match as end",
237 dev_id);
238 return -EINVAL;
239 }
240 if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_ALL_F) &&
241 !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_ALL_F)) {
242 RTE_REGEXDEV_LOG_LINE(ERR,
243 "Dev %u doesn't support match all",
244 dev_id);
245 return -EINVAL;
246 }
247 if (cfg->nb_groups == 0) {
248 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of groups must be > 0",
249 dev_id);
250 return -EINVAL;
251 }
252 if (cfg->nb_groups > dev_info.max_groups) {
253 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of groups %d > %d",
254 dev_id, cfg->nb_groups, dev_info.max_groups);
255 return -EINVAL;
256 }
257 if (cfg->nb_max_matches == 0) {
258 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of matches must be > 0",
259 dev_id);
260 return -EINVAL;
261 }
262 if (cfg->nb_max_matches > dev_info.max_matches) {
263 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of matches %d > %d",
264 dev_id, cfg->nb_max_matches,
265 dev_info.max_matches);
266 return -EINVAL;
267 }
268 if (cfg->nb_queue_pairs == 0) {
269 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of queues must be > 0",
270 dev_id);
271 return -EINVAL;
272 }
273 if (cfg->nb_queue_pairs > dev_info.max_queue_pairs) {
274 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of queues %d > %d",
275 dev_id, cfg->nb_queue_pairs,
276 dev_info.max_queue_pairs);
277 return -EINVAL;
278 }
279 if (cfg->nb_rules_per_group == 0) {
280 RTE_REGEXDEV_LOG_LINE(ERR,
281 "Dev %u num of rules per group must be > 0",
282 dev_id);
283 return -EINVAL;
284 }
285 if (cfg->nb_rules_per_group > dev_info.max_rules_per_group) {
286 RTE_REGEXDEV_LOG_LINE(ERR,
287 "Dev %u num of rules per group %d > %d",
288 dev_id, cfg->nb_rules_per_group,
289 dev_info.max_rules_per_group);
290 return -EINVAL;
291 }
292 ret = (*dev->dev_ops->dev_configure)(dev, cfg);
293 if (ret == 0)
294 dev->data->dev_conf = *cfg;
295 return ret;
296 }
297
298 int
rte_regexdev_queue_pair_setup(uint8_t dev_id,uint16_t queue_pair_id,const struct rte_regexdev_qp_conf * qp_conf)299 rte_regexdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
300 const struct rte_regexdev_qp_conf *qp_conf)
301 {
302 struct rte_regexdev *dev;
303
304 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
305 dev = &rte_regex_devices[dev_id];
306 if (*dev->dev_ops->dev_qp_setup == NULL)
307 return -ENOTSUP;
308 if (dev->data->dev_started) {
309 RTE_REGEXDEV_LOG_LINE
310 (ERR, "Dev %u must be stopped to allow configuration",
311 dev_id);
312 return -EBUSY;
313 }
314 if (queue_pair_id >= dev->data->dev_conf.nb_queue_pairs) {
315 RTE_REGEXDEV_LOG_LINE(ERR,
316 "Dev %u invalid queue %d > %d",
317 dev_id, queue_pair_id,
318 dev->data->dev_conf.nb_queue_pairs);
319 return -EINVAL;
320 }
321 if (dev->data->dev_started) {
322 RTE_REGEXDEV_LOG_LINE
323 (ERR, "Dev %u must be stopped to allow configuration",
324 dev_id);
325 return -EBUSY;
326 }
327 return (*dev->dev_ops->dev_qp_setup)(dev, queue_pair_id, qp_conf);
328 }
329
330 int
rte_regexdev_start(uint8_t dev_id)331 rte_regexdev_start(uint8_t dev_id)
332 {
333 struct rte_regexdev *dev;
334 int ret;
335
336 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
337 dev = &rte_regex_devices[dev_id];
338 if (*dev->dev_ops->dev_start == NULL)
339 return -ENOTSUP;
340 ret = (*dev->dev_ops->dev_start)(dev);
341 if (ret == 0)
342 dev->data->dev_started = 1;
343 return ret;
344 }
345
346 int
rte_regexdev_stop(uint8_t dev_id)347 rte_regexdev_stop(uint8_t dev_id)
348 {
349 struct rte_regexdev *dev;
350
351 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
352 dev = &rte_regex_devices[dev_id];
353 if (*dev->dev_ops->dev_stop == NULL)
354 return -ENOTSUP;
355 (*dev->dev_ops->dev_stop)(dev);
356 dev->data->dev_started = 0;
357 return 0;
358 }
359
360 int
rte_regexdev_close(uint8_t dev_id)361 rte_regexdev_close(uint8_t dev_id)
362 {
363 struct rte_regexdev *dev;
364
365 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
366 dev = &rte_regex_devices[dev_id];
367 if (*dev->dev_ops->dev_close == NULL)
368 return -ENOTSUP;
369 (*dev->dev_ops->dev_close)(dev);
370 dev->data->dev_started = 0;
371 dev->state = RTE_REGEXDEV_UNUSED;
372 return 0;
373 }
374
375 int
rte_regexdev_attr_get(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,void * attr_value)376 rte_regexdev_attr_get(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
377 void *attr_value)
378 {
379 struct rte_regexdev *dev;
380
381 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
382 dev = &rte_regex_devices[dev_id];
383 if (*dev->dev_ops->dev_attr_get == NULL)
384 return -ENOTSUP;
385 if (attr_value == NULL) {
386 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d attribute value can't be NULL",
387 dev_id);
388 return -EINVAL;
389 }
390 return (*dev->dev_ops->dev_attr_get)(dev, attr_id, attr_value);
391 }
392
393 int
rte_regexdev_attr_set(uint8_t dev_id,enum rte_regexdev_attr_id attr_id,const void * attr_value)394 rte_regexdev_attr_set(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
395 const void *attr_value)
396 {
397 struct rte_regexdev *dev;
398
399 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
400 dev = &rte_regex_devices[dev_id];
401 if (*dev->dev_ops->dev_attr_set == NULL)
402 return -ENOTSUP;
403 if (attr_value == NULL) {
404 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d attribute value can't be NULL",
405 dev_id);
406 return -EINVAL;
407 }
408 return (*dev->dev_ops->dev_attr_set)(dev, attr_id, attr_value);
409 }
410
411 int
rte_regexdev_rule_db_update(uint8_t dev_id,const struct rte_regexdev_rule * rules,uint32_t nb_rules)412 rte_regexdev_rule_db_update(uint8_t dev_id,
413 const struct rte_regexdev_rule *rules,
414 uint32_t nb_rules)
415 {
416 struct rte_regexdev *dev;
417
418 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
419 dev = &rte_regex_devices[dev_id];
420 if (*dev->dev_ops->dev_rule_db_update == NULL)
421 return -ENOTSUP;
422 if (rules == NULL) {
423 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d rules can't be NULL",
424 dev_id);
425 return -EINVAL;
426 }
427 return (*dev->dev_ops->dev_rule_db_update)(dev, rules, nb_rules);
428 }
429
430 int
rte_regexdev_rule_db_compile_activate(uint8_t dev_id)431 rte_regexdev_rule_db_compile_activate(uint8_t dev_id)
432 {
433 struct rte_regexdev *dev;
434
435 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
436 dev = &rte_regex_devices[dev_id];
437 if (*dev->dev_ops->dev_rule_db_compile_activate == NULL)
438 return -ENOTSUP;
439 return (*dev->dev_ops->dev_rule_db_compile_activate)(dev);
440 }
441
442 int
rte_regexdev_rule_db_import(uint8_t dev_id,const char * rule_db,uint32_t rule_db_len)443 rte_regexdev_rule_db_import(uint8_t dev_id, const char *rule_db,
444 uint32_t rule_db_len)
445 {
446 struct rte_regexdev *dev;
447
448 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
449 dev = &rte_regex_devices[dev_id];
450 if (*dev->dev_ops->dev_db_import == NULL)
451 return -ENOTSUP;
452 if (rule_db == NULL) {
453 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d rules can't be NULL",
454 dev_id);
455 return -EINVAL;
456 }
457 return (*dev->dev_ops->dev_db_import)(dev, rule_db, rule_db_len);
458 }
459
460 int
rte_regexdev_rule_db_export(uint8_t dev_id,char * rule_db)461 rte_regexdev_rule_db_export(uint8_t dev_id, char *rule_db)
462 {
463 struct rte_regexdev *dev;
464
465 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
466 dev = &rte_regex_devices[dev_id];
467 if (*dev->dev_ops->dev_db_export == NULL)
468 return -ENOTSUP;
469 return (*dev->dev_ops->dev_db_export)(dev, rule_db);
470 }
471
472 int
rte_regexdev_xstats_names_get(uint8_t dev_id,struct rte_regexdev_xstats_map * xstats_map)473 rte_regexdev_xstats_names_get(uint8_t dev_id,
474 struct rte_regexdev_xstats_map *xstats_map)
475 {
476 struct rte_regexdev *dev;
477
478 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
479 dev = &rte_regex_devices[dev_id];
480 if (*dev->dev_ops->dev_xstats_names_get == NULL)
481 return -ENOTSUP;
482 if (xstats_map == NULL) {
483 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d xstats map can't be NULL",
484 dev_id);
485 return -EINVAL;
486 }
487 return (*dev->dev_ops->dev_xstats_names_get)(dev, xstats_map);
488 }
489
490 int
rte_regexdev_xstats_get(uint8_t dev_id,const uint16_t * ids,uint64_t * values,uint16_t n)491 rte_regexdev_xstats_get(uint8_t dev_id, const uint16_t *ids,
492 uint64_t *values, uint16_t n)
493 {
494 struct rte_regexdev *dev;
495
496 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
497 dev = &rte_regex_devices[dev_id];
498 if (*dev->dev_ops->dev_xstats_get == NULL)
499 return -ENOTSUP;
500 if (ids == NULL) {
501 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d ids can't be NULL", dev_id);
502 return -EINVAL;
503 }
504 if (values == NULL) {
505 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d values can't be NULL", dev_id);
506 return -EINVAL;
507 }
508 return (*dev->dev_ops->dev_xstats_get)(dev, ids, values, n);
509 }
510
511 int
rte_regexdev_xstats_by_name_get(uint8_t dev_id,const char * name,uint16_t * id,uint64_t * value)512 rte_regexdev_xstats_by_name_get(uint8_t dev_id, const char *name,
513 uint16_t *id, uint64_t *value)
514 {
515 struct rte_regexdev *dev;
516
517 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
518 dev = &rte_regex_devices[dev_id];
519 if (*dev->dev_ops->dev_xstats_by_name_get == NULL)
520 return -ENOTSUP;
521 if (name == NULL) {
522 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d name can't be NULL", dev_id);
523 return -EINVAL;
524 }
525 if (id == NULL) {
526 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d id can't be NULL", dev_id);
527 return -EINVAL;
528 }
529 if (value == NULL) {
530 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d value can't be NULL", dev_id);
531 return -EINVAL;
532 }
533 return (*dev->dev_ops->dev_xstats_by_name_get)(dev, name, id, value);
534 }
535
536 int
rte_regexdev_xstats_reset(uint8_t dev_id,const uint16_t * ids,uint16_t nb_ids)537 rte_regexdev_xstats_reset(uint8_t dev_id, const uint16_t *ids,
538 uint16_t nb_ids)
539 {
540 struct rte_regexdev *dev;
541
542 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
543 dev = &rte_regex_devices[dev_id];
544 if (*dev->dev_ops->dev_xstats_reset == NULL)
545 return -ENOTSUP;
546 if (ids == NULL) {
547 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d ids can't be NULL", dev_id);
548 return -EINVAL;
549 }
550 return (*dev->dev_ops->dev_xstats_reset)(dev, ids, nb_ids);
551 }
552
553 int
rte_regexdev_selftest(uint8_t dev_id)554 rte_regexdev_selftest(uint8_t dev_id)
555 {
556 struct rte_regexdev *dev;
557
558 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
559 dev = &rte_regex_devices[dev_id];
560 if (*dev->dev_ops->dev_selftest == NULL)
561 return -ENOTSUP;
562 return (*dev->dev_ops->dev_selftest)(dev);
563 }
564
565 int
rte_regexdev_dump(uint8_t dev_id,FILE * f)566 rte_regexdev_dump(uint8_t dev_id, FILE *f)
567 {
568 struct rte_regexdev *dev;
569
570 RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
571 dev = &rte_regex_devices[dev_id];
572 if (*dev->dev_ops->dev_dump == NULL)
573 return -ENOTSUP;
574 if (f == NULL) {
575 RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d file can't be NULL", dev_id);
576 return -EINVAL;
577 }
578 return (*dev->dev_ops->dev_dump)(dev, f);
579 }
580