xref: /dpdk/drivers/regex/cn9k/cn9k_regexdev.c (revision 33e71acf3d446ced520f07e4d75769323e0ec22c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
13 
14 
15 /* REE common headers */
16 #include "cn9k_regexdev.h"
17 #include "cn9k_regexdev_compiler.h"
18 
19 
20 /* HW matches are at offset 0x80 from RES_PTR_ADDR
21  * In op structure matches starts at W5 (0x28)
22  * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
23  * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
24  */
25 #define REE_NUM_MATCHES_ALIGN	11
26 /* The REE co-processor will write up to 254 job match structures
27  * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
28  */
29 #define REE_MATCH_OFFSET	0x80
30 
31 #define REE_MAX_RULES_PER_GROUP 0xFFFF
32 #define REE_MAX_GROUPS 0xFFFF
33 
34 
35 #define REE_RULE_DB_VERSION	2
36 #define REE_RULE_DB_REVISION	0
37 
38 struct ree_rule_db_entry {
39 	uint8_t		type;
40 	uint32_t	addr;
41 	uint64_t	value;
42 };
43 
44 struct ree_rule_db {
45 	uint32_t version;
46 	uint32_t revision;
47 	uint32_t number_of_entries;
48 	struct ree_rule_db_entry entries[];
49 } __rte_packed;
50 
51 static void
52 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
53 {
54 	snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
55 }
56 
57 static struct roc_ree_qp *
58 ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
59 {
60 	struct cn9k_ree_data *data = dev->data->dev_private;
61 	uint64_t pg_sz = sysconf(_SC_PAGESIZE);
62 	struct roc_ree_vf *vf = &data->vf;
63 	const struct rte_memzone *lf_mem;
64 	uint32_t len, iq_len, size_div2;
65 	char name[RTE_MEMZONE_NAMESIZE];
66 	uint64_t used_len, iova;
67 	struct roc_ree_qp *qp;
68 	uint8_t *va;
69 	int ret;
70 
71 	/* Allocate queue pair */
72 	qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
73 				ROC_ALIGN);
74 	if (qp == NULL) {
75 		cn9k_err("Could not allocate queue pair");
76 		return NULL;
77 	}
78 
79 	iq_len = REE_IQ_LEN;
80 
81 	/*
82 	 * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
83 	 * and a power of 2.
84 	 * effective queue size to software is (size - 1) * 128
85 	 */
86 	size_div2 = iq_len >> 1;
87 
88 	/* For pending queue */
89 	len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
90 
91 	/* So that instruction queues start as pg size aligned */
92 	len = RTE_ALIGN(len, pg_sz);
93 
94 	/* For instruction queues */
95 	len += REE_IQ_LEN * sizeof(union roc_ree_inst);
96 
97 	/* Waste after instruction queues */
98 	len = RTE_ALIGN(len, pg_sz);
99 
100 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
101 			    qp_id);
102 
103 	lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
104 			RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
105 			RTE_CACHE_LINE_SIZE);
106 	if (lf_mem == NULL) {
107 		cn9k_err("Could not allocate reserved memzone");
108 		goto qp_free;
109 	}
110 
111 	va = lf_mem->addr;
112 	iova = lf_mem->iova;
113 
114 	memset(va, 0, len);
115 
116 	/* Initialize pending queue */
117 	qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
118 	qp->pend_q.enq_tail = 0;
119 	qp->pend_q.deq_head = 0;
120 	qp->pend_q.pending_count = 0;
121 
122 	used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
123 	used_len = RTE_ALIGN(used_len, pg_sz);
124 	iova += used_len;
125 
126 	qp->iq_dma_addr = iova;
127 	qp->id = qp_id;
128 	qp->base = roc_ree_qp_get_base(vf, qp_id);
129 	qp->roc_regexdev_jobid = 0;
130 	qp->write_offset = 0;
131 
132 	ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
133 	if (ret) {
134 		cn9k_err("Could not enable instruction queue");
135 		goto qp_free;
136 	}
137 
138 	return qp;
139 
140 qp_free:
141 	rte_free(qp);
142 	return NULL;
143 }
144 
145 static int
146 ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
147 {
148 	const struct rte_memzone *lf_mem;
149 	char name[RTE_MEMZONE_NAMESIZE];
150 	int ret;
151 
152 	roc_ree_iq_disable(qp);
153 
154 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
155 			    qp->id);
156 
157 	lf_mem = rte_memzone_lookup(name);
158 
159 	ret = rte_memzone_free(lf_mem);
160 	if (ret)
161 		return ret;
162 
163 	rte_free(qp);
164 
165 	return 0;
166 }
167 
168 static int
169 ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
170 {
171 	struct cn9k_ree_data *data = dev->data->dev_private;
172 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
173 	int ret;
174 
175 	ree_func_trace("Queue=%d", qp_id);
176 
177 	if (qp == NULL)
178 		return -EINVAL;
179 
180 	ret = ree_qp_destroy(dev, qp);
181 	if (ret) {
182 		cn9k_err("Could not destroy queue pair %d", qp_id);
183 		return ret;
184 	}
185 
186 	data->queue_pairs[qp_id] = NULL;
187 
188 	return 0;
189 }
190 
191 static struct rte_regexdev *
192 ree_dev_register(const char *name)
193 {
194 	struct rte_regexdev *dev;
195 
196 	cn9k_ree_dbg("Creating regexdev %s\n", name);
197 
198 	/* allocate device structure */
199 	dev = rte_regexdev_register(name);
200 	if (dev == NULL) {
201 		cn9k_err("Failed to allocate regex device for %s", name);
202 		return NULL;
203 	}
204 
205 	/* allocate private device structure */
206 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 		dev->data->dev_private =
208 				rte_zmalloc_socket("regexdev device private",
209 						sizeof(struct cn9k_ree_data),
210 						RTE_CACHE_LINE_SIZE,
211 						rte_socket_id());
212 
213 		if (dev->data->dev_private == NULL) {
214 			cn9k_err("Cannot allocate memory for dev %s private data",
215 					name);
216 
217 			rte_regexdev_unregister(dev);
218 			return NULL;
219 		}
220 	}
221 
222 	return dev;
223 }
224 
225 static int
226 ree_dev_unregister(struct rte_regexdev *dev)
227 {
228 	cn9k_ree_dbg("Closing regex device %s", dev->device->name);
229 
230 	/* free regex device */
231 	rte_regexdev_unregister(dev);
232 
233 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
234 		rte_free(dev->data->dev_private);
235 
236 	return 0;
237 }
238 
239 static int
240 ree_dev_fini(struct rte_regexdev *dev)
241 {
242 	struct cn9k_ree_data *data = dev->data->dev_private;
243 	struct roc_ree_vf *vf = &data->vf;
244 	int i, ret;
245 
246 	ree_func_trace();
247 
248 	for (i = 0; i < data->nb_queue_pairs; i++) {
249 		ret = ree_queue_pair_release(dev, i);
250 		if (ret)
251 			return ret;
252 	}
253 
254 	ret = roc_ree_queues_detach(vf);
255 	if (ret)
256 		cn9k_err("Could not detach queues");
257 
258 	/* TEMP : should be in lib */
259 	if (data->queue_pairs)
260 		rte_free(data->queue_pairs);
261 	if (data->rules)
262 		rte_free(data->rules);
263 
264 	roc_ree_dev_fini(vf);
265 
266 	ret = ree_dev_unregister(dev);
267 	if (ret)
268 		cn9k_err("Could not destroy PMD");
269 
270 	return ret;
271 }
272 
273 static inline int
274 ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
275 		 struct roc_ree_pending_queue *pend_q)
276 {
277 	union roc_ree_inst inst;
278 	union ree_res *res;
279 	uint32_t offset;
280 
281 	if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
282 		cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
283 		pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
284 		return -EAGAIN;
285 	}
286 	if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
287 			op->mbuf->data_len == 0)) {
288 		cn9k_err("Packet length %d is greater than MAX payload %d",
289 				op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
290 		return -EAGAIN;
291 	}
292 
293 	/* W 0 */
294 	inst.cn98xx.ooj = 1;
295 	inst.cn98xx.dg = 0;
296 	inst.cn98xx.doneint = 0;
297 	/* W 1 */
298 	inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
299 	/* W 2 */
300 	inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
301 	inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
302 
303 	/* W 3 */
304 	inst.cn98xx.res_ptr_addr = (uint64_t)op;
305 	/* W 4 */
306 	inst.cn98xx.wq_ptr = 0;
307 	/* W 5 */
308 	inst.cn98xx.ggrp = 0;
309 	inst.cn98xx.tt = 0;
310 	inst.cn98xx.tag = 0;
311 	/* W 6 */
312 	inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
313 	if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
314 		inst.cn98xx.ree_job_ctrl = (0x2 << 8);
315 	else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
316 		inst.cn98xx.ree_job_ctrl = (0x1 << 8);
317 	else
318 		inst.cn98xx.ree_job_ctrl = 0;
319 	inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
320 	/* W 7 */
321 	inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
322 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
323 		inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
324 	else
325 		inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
326 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
327 		inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
328 	else
329 		inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
330 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
331 		inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
332 	else
333 		inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
334 
335 	/* Copy REE command to Q */
336 	offset = qp->write_offset * sizeof(inst);
337 	memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
338 
339 	pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
340 	pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
341 
342 	/* Mark result as not done */
343 	res = (union ree_res *)(op);
344 	res->s.done = 0;
345 	res->s.ree_err = 0;
346 
347 	/* We will use soft queue length here to limit requests */
348 	REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
349 	pend_q->pending_count += 1;
350 	REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
351 	REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
352 
353 	return 0;
354 }
355 
356 static uint16_t
357 cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
358 		       struct rte_regex_ops **ops, uint16_t nb_ops)
359 {
360 	struct cn9k_ree_data *data = dev->data->dev_private;
361 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
362 	struct roc_ree_pending_queue *pend_q;
363 	uint16_t nb_allowed, count = 0;
364 	struct rte_regex_ops *op;
365 	int ret;
366 
367 	pend_q = &qp->pend_q;
368 
369 	nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
370 	if (nb_ops > nb_allowed)
371 		nb_ops = nb_allowed;
372 
373 	for (count = 0; count < nb_ops; count++) {
374 		op = ops[count];
375 		ret = ree_enqueue(qp, op, pend_q);
376 
377 		if (unlikely(ret))
378 			break;
379 	}
380 
381 	/*
382 	 * Make sure all instructions are written before DOORBELL is activated
383 	 */
384 	rte_io_wmb();
385 
386 	/* Update Doorbell */
387 	plt_write64(count, qp->base + REE_LF_DOORBELL);
388 
389 	return count;
390 }
391 
392 static inline void
393 ree_dequeue_post_process(struct rte_regex_ops *ops)
394 {
395 	uint8_t ree_res_mcnt, ree_res_dmcnt;
396 	int off = REE_MATCH_OFFSET;
397 	struct ree_res_s_98 *res;
398 	uint16_t ree_res_status;
399 	uint64_t match;
400 
401 	res = (struct ree_res_s_98 *)ops;
402 	/* store res values on stack since ops and res
403 	 * are using the same memory
404 	 */
405 	ree_res_status = res->ree_res_status;
406 	ree_res_mcnt = res->ree_res_mcnt;
407 	ree_res_dmcnt = res->ree_res_dmcnt;
408 	ops->rsp_flags = 0;
409 	ops->nb_actual_matches = ree_res_dmcnt;
410 	ops->nb_matches = ree_res_mcnt;
411 	if (unlikely(res->ree_err)) {
412 		ops->nb_actual_matches = 0;
413 		ops->nb_matches = 0;
414 	}
415 
416 	if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
417 		if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
418 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
419 		if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
420 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
421 		if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
422 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
423 		if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
424 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
425 		if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
426 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
427 	}
428 	if (ops->nb_matches > 0) {
429 		/* Move the matches to the correct offset */
430 		off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
431 			ops->nb_matches : REE_NUM_MATCHES_ALIGN);
432 		match = (uint64_t)ops + REE_MATCH_OFFSET;
433 		match += (ops->nb_matches - off) *
434 			sizeof(union ree_match);
435 		memcpy((void *)ops->matches, (void *)match,
436 			off * sizeof(union ree_match));
437 	}
438 }
439 
440 static uint16_t
441 cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
442 		       struct rte_regex_ops **ops, uint16_t nb_ops)
443 {
444 	struct cn9k_ree_data *data = dev->data->dev_private;
445 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
446 	struct roc_ree_pending_queue *pend_q;
447 	int i, nb_pending, nb_completed = 0;
448 	volatile struct ree_res_s_98 *res;
449 	struct roc_ree_rid *rid;
450 
451 	pend_q = &qp->pend_q;
452 
453 	nb_pending = pend_q->pending_count;
454 
455 	if (nb_ops > nb_pending)
456 		nb_ops = nb_pending;
457 
458 	for (i = 0; i < nb_ops; i++) {
459 		rid = &pend_q->rid_queue[pend_q->deq_head];
460 		res = (volatile struct ree_res_s_98 *)(rid->rid);
461 
462 		/* Check response header done bit if completed */
463 		if (unlikely(!res->done))
464 			break;
465 
466 		ops[i] = (struct rte_regex_ops *)(rid->rid);
467 		ops[i]->user_id = rid->user_id;
468 
469 		REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
470 		pend_q->pending_count -= 1;
471 	}
472 
473 	nb_completed = i;
474 
475 	for (i = 0; i < nb_completed; i++)
476 		ree_dequeue_post_process(ops[i]);
477 
478 	return nb_completed;
479 }
480 
481 static int
482 cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
483 {
484 	struct cn9k_ree_data *data = dev->data->dev_private;
485 	struct roc_ree_vf *vf = &data->vf;
486 
487 	ree_func_trace();
488 
489 	if (info == NULL)
490 		return -EINVAL;
491 
492 	info->driver_name = dev->device->driver->name;
493 	info->dev = dev->device;
494 
495 	info->max_queue_pairs = vf->max_queues;
496 	info->max_matches = vf->max_matches;
497 	info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
498 	info->max_rules_per_group = data->max_rules_per_group;
499 	info->max_groups = data->max_groups;
500 	info->regexdev_capa = data->regexdev_capa;
501 	info->rule_flags = data->rule_flags;
502 
503 	return 0;
504 }
505 
506 static int
507 cn9k_ree_dev_config(struct rte_regexdev *dev,
508 		    const struct rte_regexdev_config *cfg)
509 {
510 	struct cn9k_ree_data *data = dev->data->dev_private;
511 	struct roc_ree_vf *vf = &data->vf;
512 	const struct ree_rule_db *rule_db;
513 	uint32_t rule_db_len;
514 	int ret;
515 
516 	ree_func_trace();
517 
518 	if (cfg->nb_queue_pairs > vf->max_queues) {
519 		cn9k_err("Invalid number of queue pairs requested");
520 		return -EINVAL;
521 	}
522 
523 	if (cfg->nb_max_matches != vf->max_matches) {
524 		cn9k_err("Invalid number of max matches requested");
525 		return -EINVAL;
526 	}
527 
528 	if (cfg->dev_cfg_flags != 0) {
529 		cn9k_err("Invalid device configuration flags requested");
530 		return -EINVAL;
531 	}
532 
533 	/* Unregister error interrupts */
534 	if (vf->err_intr_registered)
535 		roc_ree_err_intr_unregister(vf);
536 
537 	/* Detach queues */
538 	if (vf->nb_queues) {
539 		ret = roc_ree_queues_detach(vf);
540 		if (ret) {
541 			cn9k_err("Could not detach REE queues");
542 			return ret;
543 		}
544 	}
545 
546 	/* TEMP : should be in lib */
547 	if (data->queue_pairs == NULL) { /* first time configuration */
548 		data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
549 				sizeof(data->queue_pairs[0]) *
550 				cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
551 
552 		if (data->queue_pairs == NULL) {
553 			data->nb_queue_pairs = 0;
554 			cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
555 					cfg->nb_queue_pairs);
556 			return -ENOMEM;
557 		}
558 	} else { /* re-configure */
559 		uint16_t old_nb_queues = data->nb_queue_pairs;
560 		void **qp;
561 		unsigned int i;
562 
563 		qp = data->queue_pairs;
564 
565 		for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
566 			ret = ree_queue_pair_release(dev, i);
567 			if (ret < 0)
568 				return ret;
569 		}
570 
571 		qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
572 				RTE_CACHE_LINE_SIZE);
573 		if (qp == NULL) {
574 			cn9k_err("Failed to realloc qp meta data, nb_queues %u",
575 					cfg->nb_queue_pairs);
576 			return -ENOMEM;
577 		}
578 
579 		if (cfg->nb_queue_pairs > old_nb_queues) {
580 			uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
581 			memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
582 		}
583 
584 		data->queue_pairs = qp;
585 	}
586 	data->nb_queue_pairs = cfg->nb_queue_pairs;
587 
588 	/* Attach queues */
589 	cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
590 	ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
591 	if (ret) {
592 		cn9k_err("Could not attach queues");
593 		return -ENODEV;
594 	}
595 
596 	ret = roc_ree_msix_offsets_get(vf);
597 	if (ret) {
598 		cn9k_err("Could not get MSI-X offsets");
599 		goto queues_detach;
600 	}
601 
602 	if (cfg->rule_db && cfg->rule_db_len) {
603 		cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
604 		rule_db = (const struct ree_rule_db *)cfg->rule_db;
605 		rule_db_len = rule_db->number_of_entries *
606 				sizeof(struct ree_rule_db_entry);
607 		cn9k_ree_dbg("rule_db number of entries %d",
608 				rule_db->number_of_entries);
609 		if (rule_db_len > cfg->rule_db_len) {
610 			cn9k_err("Could not program rule db");
611 			ret = -EINVAL;
612 			goto queues_detach;
613 		}
614 		ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
615 				rule_db_len, NULL, REE_NON_INC_PROG);
616 		if (ret) {
617 			cn9k_err("Could not program rule db");
618 			goto queues_detach;
619 		}
620 	}
621 
622 	dev->enqueue = cn9k_ree_enqueue_burst;
623 	dev->dequeue = cn9k_ree_dequeue_burst;
624 
625 	rte_mb();
626 	return 0;
627 
628 queues_detach:
629 	roc_ree_queues_detach(vf);
630 	return ret;
631 }
632 
633 static int
634 cn9k_ree_stop(struct rte_regexdev *dev)
635 {
636 	RTE_SET_USED(dev);
637 
638 	ree_func_trace();
639 	return 0;
640 }
641 
642 static int
643 cn9k_ree_start(struct rte_regexdev *dev)
644 {
645 	struct cn9k_ree_data *data = dev->data->dev_private;
646 	struct roc_ree_vf *vf = &data->vf;
647 	uint32_t rule_db_len = 0;
648 	int ret;
649 
650 	ree_func_trace();
651 
652 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
653 	if (ret)
654 		return ret;
655 	if (rule_db_len == 0) {
656 		cn9k_err("Rule db not programmed");
657 		return -EFAULT;
658 	}
659 
660 	return 0;
661 }
662 
663 static int
664 cn9k_ree_close(struct rte_regexdev *dev)
665 {
666 	return ree_dev_fini(dev);
667 }
668 
669 static int
670 cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
671 		const struct rte_regexdev_qp_conf *qp_conf)
672 {
673 	struct cn9k_ree_data *data = dev->data->dev_private;
674 	struct roc_ree_qp *qp;
675 
676 	ree_func_trace("Queue=%d", qp_id);
677 
678 	if (data->queue_pairs[qp_id] != NULL)
679 		ree_queue_pair_release(dev, qp_id);
680 
681 	if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
682 		cn9k_err("Could not setup queue pair for %u descriptors",
683 				qp_conf->nb_desc);
684 		return -EINVAL;
685 	}
686 	if (qp_conf->qp_conf_flags != 0) {
687 		cn9k_err("Could not setup queue pair with configuration flags 0x%x",
688 				qp_conf->qp_conf_flags);
689 		return -EINVAL;
690 	}
691 
692 	qp = ree_qp_create(dev, qp_id);
693 	if (qp == NULL) {
694 		cn9k_err("Could not create queue pair %d", qp_id);
695 		return -ENOMEM;
696 	}
697 	data->queue_pairs[qp_id] = qp;
698 
699 	return 0;
700 }
701 
702 static int
703 cn9k_ree_rule_db_compile_activate(struct rte_regexdev *dev)
704 {
705 	return cn9k_ree_rule_db_compile_prog(dev);
706 }
707 
708 static int
709 cn9k_ree_rule_db_update(struct rte_regexdev *dev,
710 		const struct rte_regexdev_rule *rules, uint16_t nb_rules)
711 {
712 	struct cn9k_ree_data *data = dev->data->dev_private;
713 	struct rte_regexdev_rule *old_ptr;
714 	uint32_t i, sum_nb_rules;
715 
716 	ree_func_trace("nb_rules=%d", nb_rules);
717 
718 	for (i = 0; i < nb_rules; i++) {
719 		if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
720 			break;
721 		if (rules[i].group_id >= data->max_groups)
722 			break;
723 		if (rules[i].rule_id >= data->max_rules_per_group)
724 			break;
725 		/* logical implication
726 		 * p    q    p -> q
727 		 * 0    0      1
728 		 * 0    1      1
729 		 * 1    0      0
730 		 * 1    1      1
731 		 */
732 		if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
733 			break;
734 	}
735 	nb_rules = i;
736 
737 	if (data->nb_rules == 0) {
738 
739 		data->rules = rte_malloc("rte_regexdev_rules",
740 				nb_rules*sizeof(struct rte_regexdev_rule), 0);
741 		if (data->rules == NULL)
742 			return -ENOMEM;
743 
744 		memcpy(data->rules, rules,
745 				nb_rules*sizeof(struct rte_regexdev_rule));
746 		data->nb_rules = nb_rules;
747 	} else {
748 
749 		old_ptr = data->rules;
750 		sum_nb_rules = data->nb_rules + nb_rules;
751 		data->rules = rte_realloc(data->rules,
752 				sum_nb_rules * sizeof(struct rte_regexdev_rule),
753 							0);
754 		if (data->rules == NULL) {
755 			data->rules = old_ptr;
756 			return -ENOMEM;
757 		}
758 		memcpy(&data->rules[data->nb_rules], rules,
759 				nb_rules*sizeof(struct rte_regexdev_rule));
760 		data->nb_rules = sum_nb_rules;
761 	}
762 	return nb_rules;
763 }
764 
765 static int
766 cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
767 		uint32_t rule_db_len)
768 {
769 	struct cn9k_ree_data *data = dev->data->dev_private;
770 	struct roc_ree_vf *vf = &data->vf;
771 	const struct ree_rule_db *ree_rule_db;
772 	uint32_t ree_rule_db_len;
773 	int ret;
774 
775 	ree_func_trace("rule_db_len=%d", rule_db_len);
776 
777 	ree_rule_db = (const struct ree_rule_db *)rule_db;
778 	ree_rule_db_len = ree_rule_db->number_of_entries *
779 			sizeof(struct ree_rule_db_entry);
780 	if (ree_rule_db_len > rule_db_len) {
781 		cn9k_err("Could not program rule db");
782 		return -EINVAL;
783 	}
784 	ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
785 			ree_rule_db_len, NULL, REE_NON_INC_PROG);
786 	if (ret) {
787 		cn9k_err("Could not program rule db");
788 		return -ENOSPC;
789 	}
790 	return 0;
791 }
792 
793 static int
794 cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
795 {
796 	struct cn9k_ree_data *data = dev->data->dev_private;
797 	struct roc_ree_vf *vf = &data->vf;
798 	struct ree_rule_db *ree_rule_db;
799 	uint32_t rule_dbi_len;
800 	uint32_t rule_db_len;
801 	int ret;
802 
803 	ree_func_trace();
804 
805 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
806 	if (ret)
807 		return ret;
808 
809 	if (rule_db == NULL) {
810 		rule_db_len += sizeof(struct ree_rule_db);
811 		return rule_db_len;
812 	}
813 
814 	ree_rule_db = (struct ree_rule_db *)rule_db;
815 	ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
816 			rule_db_len, NULL, 0);
817 	if (ret) {
818 		cn9k_err("Could not export rule db");
819 		return -EFAULT;
820 	}
821 	ree_rule_db->number_of_entries =
822 			rule_db_len/sizeof(struct ree_rule_db_entry);
823 	ree_rule_db->revision = REE_RULE_DB_REVISION;
824 	ree_rule_db->version = REE_RULE_DB_VERSION;
825 
826 	return 0;
827 }
828 
829 static struct rte_regexdev_ops cn9k_ree_ops = {
830 	.dev_info_get = cn9k_ree_dev_info_get,
831 	.dev_configure = cn9k_ree_dev_config,
832 	.dev_qp_setup = cn9k_ree_queue_pair_setup,
833 	.dev_start = cn9k_ree_start,
834 	.dev_stop = cn9k_ree_stop,
835 	.dev_close = cn9k_ree_close,
836 	.dev_attr_get = NULL,
837 	.dev_attr_set = NULL,
838 	.dev_rule_db_update = cn9k_ree_rule_db_update,
839 	.dev_rule_db_compile_activate =
840 			cn9k_ree_rule_db_compile_activate,
841 	.dev_db_import = cn9k_ree_rule_db_import,
842 	.dev_db_export = cn9k_ree_rule_db_export,
843 	.dev_xstats_names_get = NULL,
844 	.dev_xstats_get = NULL,
845 	.dev_xstats_by_name_get = NULL,
846 	.dev_xstats_reset = NULL,
847 	.dev_selftest = NULL,
848 	.dev_dump = NULL,
849 };
850 
851 static int
852 cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
853 		   struct rte_pci_device *pci_dev)
854 {
855 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
856 	struct cn9k_ree_data *data;
857 	struct rte_regexdev *dev;
858 	struct roc_ree_vf *vf;
859 	int ret;
860 
861 	ret = roc_plt_init();
862 	if (ret < 0) {
863 		plt_err("Failed to initialize platform model");
864 		return ret;
865 	}
866 
867 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
868 
869 	dev = ree_dev_register(name);
870 	if (dev == NULL) {
871 		ret = -ENODEV;
872 		goto exit;
873 	}
874 
875 	dev->dev_ops = &cn9k_ree_ops;
876 	dev->device = &pci_dev->device;
877 
878 	/* Get private data space allocated */
879 	data = dev->data->dev_private;
880 	vf = &data->vf;
881 	vf->pci_dev = pci_dev;
882 	ret = roc_ree_dev_init(vf);
883 	if (ret) {
884 		plt_err("Failed to initialize roc cpt rc=%d", ret);
885 		goto dev_unregister;
886 	}
887 
888 	data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
889 			RTE_REGEX_PCRE_RULE_ANCHORED_F;
890 	data->regexdev_capa = 0;
891 	data->max_groups = REE_MAX_GROUPS;
892 	data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
893 	data->nb_rules = 0;
894 
895 	dev->state = RTE_REGEXDEV_READY;
896 	return 0;
897 
898 dev_unregister:
899 	ree_dev_unregister(dev);
900 exit:
901 	cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
902 		    pci_dev->id.vendor_id, pci_dev->id.device_id);
903 	return ret;
904 }
905 
906 static int
907 cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
908 {
909 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
910 	struct rte_regexdev *dev = NULL;
911 
912 	if (pci_dev == NULL)
913 		return -EINVAL;
914 
915 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
916 
917 	dev = rte_regexdev_get_device_by_name(name);
918 
919 	if (dev == NULL)
920 		return -ENODEV;
921 
922 	return ree_dev_fini(dev);
923 }
924 
925 static struct rte_pci_id pci_id_ree_table[] = {
926 	{
927 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
928 				PCI_DEVID_CNXK_RVU_REE_PF)
929 	},
930 	{
931 		.vendor_id = 0,
932 	}
933 };
934 
935 static struct rte_pci_driver cn9k_regexdev_pmd = {
936 	.id_table = pci_id_ree_table,
937 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
938 	.probe = cn9k_ree_pci_probe,
939 	.remove = cn9k_ree_pci_remove,
940 };
941 
942 
943 RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
944 RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);
945