xref: /dpdk/drivers/regex/cn9k/cn9k_regexdev.c (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
13 
14 
15 /* REE common headers */
16 #include "cn9k_regexdev.h"
17 
18 
19 /* HW matches are at offset 0x80 from RES_PTR_ADDR
20  * In op structure matches starts at W5 (0x28)
21  * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
22  * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
23  */
24 #define REE_NUM_MATCHES_ALIGN	11
25 /* The REE co-processor will write up to 254 job match structures
26  * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
27  */
28 #define REE_MATCH_OFFSET	0x80
29 
30 #define REE_MAX_RULES_PER_GROUP 0xFFFF
31 #define REE_MAX_GROUPS 0xFFFF
32 
33 
34 #define REE_RULE_DB_VERSION	2
35 #define REE_RULE_DB_REVISION	0
36 
37 struct ree_rule_db_entry {
38 	uint8_t		type;
39 	uint32_t	addr;
40 	uint64_t	value;
41 };
42 
43 struct __rte_packed_begin ree_rule_db {
44 	uint32_t version;
45 	uint32_t revision;
46 	uint32_t number_of_entries;
47 	struct ree_rule_db_entry entries[];
48 } __rte_packed_end;
49 
50 static void
51 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
52 {
53 	snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
54 }
55 
56 static struct roc_ree_qp *
57 ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
58 {
59 	struct cn9k_ree_data *data = dev->data->dev_private;
60 	uint64_t pg_sz = sysconf(_SC_PAGESIZE);
61 	struct roc_ree_vf *vf = &data->vf;
62 	const struct rte_memzone *lf_mem;
63 	uint32_t len, iq_len, size_div2;
64 	char name[RTE_MEMZONE_NAMESIZE];
65 	uint64_t used_len, iova;
66 	struct roc_ree_qp *qp;
67 	uint8_t *va;
68 	int ret;
69 
70 	/* Allocate queue pair */
71 	qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
72 				ROC_ALIGN);
73 	if (qp == NULL) {
74 		cn9k_err("Could not allocate queue pair");
75 		return NULL;
76 	}
77 
78 	iq_len = REE_IQ_LEN;
79 
80 	/*
81 	 * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
82 	 * and a power of 2.
83 	 * effective queue size to software is (size - 1) * 128
84 	 */
85 	size_div2 = iq_len >> 1;
86 
87 	/* For pending queue */
88 	len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
89 
90 	/* So that instruction queues start as pg size aligned */
91 	len = RTE_ALIGN(len, pg_sz);
92 
93 	/* For instruction queues */
94 	len += REE_IQ_LEN * sizeof(union roc_ree_inst);
95 
96 	/* Waste after instruction queues */
97 	len = RTE_ALIGN(len, pg_sz);
98 
99 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
100 			    qp_id);
101 
102 	lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
103 			RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
104 			RTE_CACHE_LINE_SIZE);
105 	if (lf_mem == NULL) {
106 		cn9k_err("Could not allocate reserved memzone");
107 		goto qp_free;
108 	}
109 
110 	va = lf_mem->addr;
111 	iova = lf_mem->iova;
112 
113 	memset(va, 0, len);
114 
115 	/* Initialize pending queue */
116 	qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
117 	qp->pend_q.enq_tail = 0;
118 	qp->pend_q.deq_head = 0;
119 	qp->pend_q.pending_count = 0;
120 
121 	used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
122 	used_len = RTE_ALIGN(used_len, pg_sz);
123 	iova += used_len;
124 
125 	qp->iq_dma_addr = iova;
126 	qp->id = qp_id;
127 	qp->base = roc_ree_qp_get_base(vf, qp_id);
128 	qp->roc_regexdev_jobid = 0;
129 	qp->write_offset = 0;
130 
131 	ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
132 	if (ret) {
133 		cn9k_err("Could not enable instruction queue");
134 		goto qp_free;
135 	}
136 
137 	return qp;
138 
139 qp_free:
140 	rte_free(qp);
141 	return NULL;
142 }
143 
144 static int
145 ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
146 {
147 	const struct rte_memzone *lf_mem;
148 	char name[RTE_MEMZONE_NAMESIZE];
149 	int ret;
150 
151 	roc_ree_iq_disable(qp);
152 
153 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
154 			    qp->id);
155 
156 	lf_mem = rte_memzone_lookup(name);
157 
158 	ret = rte_memzone_free(lf_mem);
159 	if (ret)
160 		return ret;
161 
162 	rte_free(qp);
163 
164 	return 0;
165 }
166 
167 static int
168 ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
169 {
170 	struct cn9k_ree_data *data = dev->data->dev_private;
171 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
172 	int ret;
173 
174 	ree_func_trace("Queue=%d", qp_id);
175 
176 	if (qp == NULL)
177 		return -EINVAL;
178 
179 	ret = ree_qp_destroy(dev, qp);
180 	if (ret) {
181 		cn9k_err("Could not destroy queue pair %d", qp_id);
182 		return ret;
183 	}
184 
185 	data->queue_pairs[qp_id] = NULL;
186 
187 	return 0;
188 }
189 
190 static struct rte_regexdev *
191 ree_dev_register(const char *name)
192 {
193 	struct rte_regexdev *dev;
194 
195 	cn9k_ree_dbg("Creating regexdev %s", name);
196 
197 	/* allocate device structure */
198 	dev = rte_regexdev_register(name);
199 	if (dev == NULL) {
200 		cn9k_err("Failed to allocate regex device for %s", name);
201 		return NULL;
202 	}
203 
204 	/* allocate private device structure */
205 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
206 		dev->data->dev_private =
207 				rte_zmalloc_socket("regexdev device private",
208 						sizeof(struct cn9k_ree_data),
209 						RTE_CACHE_LINE_SIZE,
210 						rte_socket_id());
211 
212 		if (dev->data->dev_private == NULL) {
213 			cn9k_err("Cannot allocate memory for dev %s private data",
214 					name);
215 
216 			rte_regexdev_unregister(dev);
217 			return NULL;
218 		}
219 	}
220 
221 	return dev;
222 }
223 
224 static int
225 ree_dev_unregister(struct rte_regexdev *dev)
226 {
227 	cn9k_ree_dbg("Closing regex device %s", dev->device->name);
228 
229 	/* free regex device */
230 	rte_regexdev_unregister(dev);
231 
232 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
233 		rte_free(dev->data->dev_private);
234 
235 	return 0;
236 }
237 
238 static int
239 ree_dev_fini(struct rte_regexdev *dev)
240 {
241 	struct cn9k_ree_data *data = dev->data->dev_private;
242 	struct roc_ree_vf *vf = &data->vf;
243 	int i, ret;
244 
245 	ree_func_trace();
246 
247 	for (i = 0; i < data->nb_queue_pairs; i++) {
248 		ret = ree_queue_pair_release(dev, i);
249 		if (ret)
250 			return ret;
251 	}
252 
253 	ret = roc_ree_queues_detach(vf);
254 	if (ret)
255 		cn9k_err("Could not detach queues");
256 
257 	/* TEMP : should be in lib */
258 	rte_free(data->queue_pairs);
259 	rte_free(data->rules);
260 
261 	roc_ree_dev_fini(vf);
262 
263 	ret = ree_dev_unregister(dev);
264 	if (ret)
265 		cn9k_err("Could not destroy PMD");
266 
267 	return ret;
268 }
269 
270 static inline int
271 ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
272 		 struct roc_ree_pending_queue *pend_q)
273 {
274 	union roc_ree_inst inst;
275 	union ree_res *res;
276 	uint32_t offset;
277 
278 	if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
279 		cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
280 		pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
281 		return -EAGAIN;
282 	}
283 	if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
284 			op->mbuf->data_len == 0)) {
285 		cn9k_err("Packet length %d is greater than MAX payload %d",
286 				op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
287 		return -EAGAIN;
288 	}
289 
290 	/* W 0 */
291 	inst.cn98xx.ooj = 1;
292 	inst.cn98xx.dg = 0;
293 	inst.cn98xx.doneint = 0;
294 	/* W 1 */
295 	inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
296 	/* W 2 */
297 	inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
298 	inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
299 
300 	/* W 3 */
301 	inst.cn98xx.res_ptr_addr = (uint64_t)op;
302 	/* W 4 */
303 	inst.cn98xx.wq_ptr = 0;
304 	/* W 5 */
305 	inst.cn98xx.ggrp = 0;
306 	inst.cn98xx.tt = 0;
307 	inst.cn98xx.tag = 0;
308 	/* W 6 */
309 	inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
310 	if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
311 		inst.cn98xx.ree_job_ctrl = (0x2 << 8);
312 	else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
313 		inst.cn98xx.ree_job_ctrl = (0x1 << 8);
314 	else
315 		inst.cn98xx.ree_job_ctrl = 0;
316 	inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
317 	/* W 7 */
318 	inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
319 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
320 		inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
321 	else
322 		inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
323 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
324 		inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
325 	else
326 		inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
327 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
328 		inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
329 	else
330 		inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
331 
332 	/* Copy REE command to Q */
333 	offset = qp->write_offset * sizeof(inst);
334 	memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
335 
336 	pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
337 	pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
338 
339 	/* Mark result as not done */
340 	res = (union ree_res *)(op);
341 	res->s.done = 0;
342 	res->s.ree_err = 0;
343 
344 	/* We will use soft queue length here to limit requests */
345 	REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
346 	pend_q->pending_count += 1;
347 	REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
348 	REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
349 
350 	return 0;
351 }
352 
353 static uint16_t
354 cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
355 		       struct rte_regex_ops **ops, uint16_t nb_ops)
356 {
357 	struct cn9k_ree_data *data = dev->data->dev_private;
358 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
359 	struct roc_ree_pending_queue *pend_q;
360 	uint16_t nb_allowed, count = 0;
361 	struct rte_regex_ops *op;
362 	int ret;
363 
364 	pend_q = &qp->pend_q;
365 
366 	nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
367 	if (nb_ops > nb_allowed)
368 		nb_ops = nb_allowed;
369 
370 	for (count = 0; count < nb_ops; count++) {
371 		op = ops[count];
372 		ret = ree_enqueue(qp, op, pend_q);
373 
374 		if (unlikely(ret))
375 			break;
376 	}
377 
378 	/*
379 	 * Make sure all instructions are written before DOORBELL is activated
380 	 */
381 	rte_io_wmb();
382 
383 	/* Update Doorbell */
384 	plt_write64(count, qp->base + REE_LF_DOORBELL);
385 
386 	return count;
387 }
388 
389 static inline void
390 ree_dequeue_post_process(struct rte_regex_ops *ops)
391 {
392 	uint8_t ree_res_mcnt, ree_res_dmcnt;
393 	int off = REE_MATCH_OFFSET;
394 	struct ree_res_s_98 *res;
395 	uint16_t ree_res_status;
396 	uint64_t match;
397 
398 	res = (struct ree_res_s_98 *)ops;
399 	/* store res values on stack since ops and res
400 	 * are using the same memory
401 	 */
402 	ree_res_status = res->ree_res_status;
403 	ree_res_mcnt = res->ree_res_mcnt;
404 	ree_res_dmcnt = res->ree_res_dmcnt;
405 	ops->rsp_flags = 0;
406 	ops->nb_actual_matches = ree_res_dmcnt;
407 	ops->nb_matches = ree_res_mcnt;
408 	if (unlikely(res->ree_err)) {
409 		ops->nb_actual_matches = 0;
410 		ops->nb_matches = 0;
411 	}
412 
413 	if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
414 		if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
415 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
416 		if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
417 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
418 		if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
419 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
420 		if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
421 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
422 		if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
423 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
424 	}
425 	if (ops->nb_matches > 0) {
426 		/* Move the matches to the correct offset */
427 		off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
428 			ops->nb_matches : REE_NUM_MATCHES_ALIGN);
429 		match = (uint64_t)ops + REE_MATCH_OFFSET;
430 		match += (ops->nb_matches - off) *
431 			sizeof(union ree_match);
432 		memcpy((void *)ops->matches, (void *)match,
433 			off * sizeof(union ree_match));
434 	}
435 }
436 
437 static uint16_t
438 cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
439 		       struct rte_regex_ops **ops, uint16_t nb_ops)
440 {
441 	struct cn9k_ree_data *data = dev->data->dev_private;
442 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
443 	struct roc_ree_pending_queue *pend_q;
444 	int i, nb_pending, nb_completed = 0;
445 	volatile struct ree_res_s_98 *res;
446 	struct roc_ree_rid *rid;
447 
448 	pend_q = &qp->pend_q;
449 
450 	nb_pending = pend_q->pending_count;
451 
452 	if (nb_ops > nb_pending)
453 		nb_ops = nb_pending;
454 
455 	for (i = 0; i < nb_ops; i++) {
456 		rid = &pend_q->rid_queue[pend_q->deq_head];
457 		res = (volatile struct ree_res_s_98 *)(rid->rid);
458 
459 		/* Check response header done bit if completed */
460 		if (unlikely(!res->done))
461 			break;
462 
463 		ops[i] = (struct rte_regex_ops *)(rid->rid);
464 		ops[i]->user_id = rid->user_id;
465 
466 		REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
467 		pend_q->pending_count -= 1;
468 	}
469 
470 	nb_completed = i;
471 
472 	for (i = 0; i < nb_completed; i++)
473 		ree_dequeue_post_process(ops[i]);
474 
475 	return nb_completed;
476 }
477 
478 static int
479 cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
480 {
481 	struct cn9k_ree_data *data = dev->data->dev_private;
482 	struct roc_ree_vf *vf = &data->vf;
483 
484 	ree_func_trace();
485 
486 	if (info == NULL)
487 		return -EINVAL;
488 
489 	info->driver_name = dev->device->driver->name;
490 	info->dev = dev->device;
491 
492 	info->max_queue_pairs = vf->max_queues;
493 	info->max_matches = vf->max_matches;
494 	info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
495 	info->max_rules_per_group = data->max_rules_per_group;
496 	info->max_groups = data->max_groups;
497 	info->regexdev_capa = data->regexdev_capa;
498 	info->rule_flags = data->rule_flags;
499 
500 	return 0;
501 }
502 
503 static int
504 cn9k_ree_dev_config(struct rte_regexdev *dev,
505 		    const struct rte_regexdev_config *cfg)
506 {
507 	struct cn9k_ree_data *data = dev->data->dev_private;
508 	struct roc_ree_vf *vf = &data->vf;
509 	const struct ree_rule_db *rule_db;
510 	uint32_t rule_db_len;
511 	int ret;
512 
513 	ree_func_trace();
514 
515 	if (cfg->nb_queue_pairs > vf->max_queues) {
516 		cn9k_err("Invalid number of queue pairs requested");
517 		return -EINVAL;
518 	}
519 
520 	if (cfg->nb_max_matches != vf->max_matches) {
521 		cn9k_err("Invalid number of max matches requested");
522 		return -EINVAL;
523 	}
524 
525 	if (cfg->dev_cfg_flags != 0) {
526 		cn9k_err("Invalid device configuration flags requested");
527 		return -EINVAL;
528 	}
529 
530 	/* Unregister error interrupts */
531 	if (vf->err_intr_registered)
532 		roc_ree_err_intr_unregister(vf);
533 
534 	/* Detach queues */
535 	if (vf->nb_queues) {
536 		ret = roc_ree_queues_detach(vf);
537 		if (ret) {
538 			cn9k_err("Could not detach REE queues");
539 			return ret;
540 		}
541 	}
542 
543 	/* TEMP : should be in lib */
544 	if (data->queue_pairs == NULL) { /* first time configuration */
545 		data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
546 				sizeof(data->queue_pairs[0]) *
547 				cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
548 
549 		if (data->queue_pairs == NULL) {
550 			data->nb_queue_pairs = 0;
551 			cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
552 					cfg->nb_queue_pairs);
553 			return -ENOMEM;
554 		}
555 	} else { /* re-configure */
556 		uint16_t old_nb_queues = data->nb_queue_pairs;
557 		void **qp;
558 		unsigned int i;
559 
560 		qp = data->queue_pairs;
561 
562 		for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
563 			ret = ree_queue_pair_release(dev, i);
564 			if (ret < 0)
565 				return ret;
566 		}
567 
568 		qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
569 				RTE_CACHE_LINE_SIZE);
570 		if (qp == NULL) {
571 			cn9k_err("Failed to realloc qp meta data, nb_queues %u",
572 					cfg->nb_queue_pairs);
573 			return -ENOMEM;
574 		}
575 
576 		if (cfg->nb_queue_pairs > old_nb_queues) {
577 			uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
578 			memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
579 		}
580 
581 		data->queue_pairs = qp;
582 	}
583 	data->nb_queue_pairs = cfg->nb_queue_pairs;
584 
585 	/* Attach queues */
586 	cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
587 	ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
588 	if (ret) {
589 		cn9k_err("Could not attach queues");
590 		return -ENODEV;
591 	}
592 
593 	ret = roc_ree_msix_offsets_get(vf);
594 	if (ret) {
595 		cn9k_err("Could not get MSI-X offsets");
596 		goto queues_detach;
597 	}
598 
599 	if (cfg->rule_db && cfg->rule_db_len) {
600 		cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
601 		rule_db = (const struct ree_rule_db *)cfg->rule_db;
602 		rule_db_len = rule_db->number_of_entries *
603 				sizeof(struct ree_rule_db_entry);
604 		cn9k_ree_dbg("rule_db number of entries %d",
605 				rule_db->number_of_entries);
606 		if (rule_db_len > cfg->rule_db_len) {
607 			cn9k_err("Could not program rule db");
608 			ret = -EINVAL;
609 			goto queues_detach;
610 		}
611 		ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
612 				rule_db_len, NULL, REE_NON_INC_PROG);
613 		if (ret) {
614 			cn9k_err("Could not program rule db");
615 			goto queues_detach;
616 		}
617 	}
618 
619 	dev->enqueue = cn9k_ree_enqueue_burst;
620 	dev->dequeue = cn9k_ree_dequeue_burst;
621 
622 	rte_mb();
623 	return 0;
624 
625 queues_detach:
626 	roc_ree_queues_detach(vf);
627 	return ret;
628 }
629 
630 static int
631 cn9k_ree_stop(struct rte_regexdev *dev)
632 {
633 	RTE_SET_USED(dev);
634 
635 	ree_func_trace();
636 	return 0;
637 }
638 
639 static int
640 cn9k_ree_start(struct rte_regexdev *dev)
641 {
642 	struct cn9k_ree_data *data = dev->data->dev_private;
643 	struct roc_ree_vf *vf = &data->vf;
644 	uint32_t rule_db_len = 0;
645 	int ret;
646 
647 	ree_func_trace();
648 
649 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
650 	if (ret)
651 		return ret;
652 	if (rule_db_len == 0) {
653 		cn9k_err("Rule db not programmed");
654 		return -EFAULT;
655 	}
656 
657 	return 0;
658 }
659 
660 static int
661 cn9k_ree_close(struct rte_regexdev *dev)
662 {
663 	return ree_dev_fini(dev);
664 }
665 
666 static int
667 cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
668 		const struct rte_regexdev_qp_conf *qp_conf)
669 {
670 	struct cn9k_ree_data *data = dev->data->dev_private;
671 	struct roc_ree_qp *qp;
672 
673 	ree_func_trace("Queue=%d", qp_id);
674 
675 	if (data->queue_pairs[qp_id] != NULL)
676 		ree_queue_pair_release(dev, qp_id);
677 
678 	if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
679 		cn9k_err("Could not setup queue pair for %u descriptors",
680 				qp_conf->nb_desc);
681 		return -EINVAL;
682 	}
683 	if (qp_conf->qp_conf_flags != 0) {
684 		cn9k_err("Could not setup queue pair with configuration flags 0x%x",
685 				qp_conf->qp_conf_flags);
686 		return -EINVAL;
687 	}
688 
689 	qp = ree_qp_create(dev, qp_id);
690 	if (qp == NULL) {
691 		cn9k_err("Could not create queue pair %d", qp_id);
692 		return -ENOMEM;
693 	}
694 	data->queue_pairs[qp_id] = qp;
695 
696 	return 0;
697 }
698 
699 static int
700 cn9k_ree_rule_db_update(struct rte_regexdev *dev,
701 		const struct rte_regexdev_rule *rules, uint16_t nb_rules)
702 {
703 	struct cn9k_ree_data *data = dev->data->dev_private;
704 	struct rte_regexdev_rule *old_ptr;
705 	uint32_t i, sum_nb_rules;
706 
707 	ree_func_trace("nb_rules=%d", nb_rules);
708 
709 	for (i = 0; i < nb_rules; i++) {
710 		if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
711 			break;
712 		if (rules[i].group_id >= data->max_groups)
713 			break;
714 		if (rules[i].rule_id >= data->max_rules_per_group)
715 			break;
716 		/* logical implication
717 		 * p    q    p -> q
718 		 * 0    0      1
719 		 * 0    1      1
720 		 * 1    0      0
721 		 * 1    1      1
722 		 */
723 		if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
724 			break;
725 	}
726 	nb_rules = i;
727 
728 	if (data->nb_rules == 0) {
729 
730 		data->rules = rte_malloc("rte_regexdev_rules",
731 				nb_rules*sizeof(struct rte_regexdev_rule), 0);
732 		if (data->rules == NULL)
733 			return -ENOMEM;
734 
735 		memcpy(data->rules, rules,
736 				nb_rules*sizeof(struct rte_regexdev_rule));
737 		data->nb_rules = nb_rules;
738 	} else {
739 
740 		old_ptr = data->rules;
741 		sum_nb_rules = data->nb_rules + nb_rules;
742 		data->rules = rte_realloc(data->rules,
743 				sum_nb_rules * sizeof(struct rte_regexdev_rule),
744 							0);
745 		if (data->rules == NULL) {
746 			data->rules = old_ptr;
747 			return -ENOMEM;
748 		}
749 		memcpy(&data->rules[data->nb_rules], rules,
750 				nb_rules*sizeof(struct rte_regexdev_rule));
751 		data->nb_rules = sum_nb_rules;
752 	}
753 	return nb_rules;
754 }
755 
756 static int
757 cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
758 		uint32_t rule_db_len)
759 {
760 	struct cn9k_ree_data *data = dev->data->dev_private;
761 	struct roc_ree_vf *vf = &data->vf;
762 	const struct ree_rule_db *ree_rule_db;
763 	uint32_t ree_rule_db_len;
764 	int ret;
765 
766 	ree_func_trace("rule_db_len=%d", rule_db_len);
767 
768 	ree_rule_db = (const struct ree_rule_db *)rule_db;
769 	ree_rule_db_len = ree_rule_db->number_of_entries *
770 			sizeof(struct ree_rule_db_entry);
771 	if (ree_rule_db_len > rule_db_len) {
772 		cn9k_err("Could not program rule db");
773 		return -EINVAL;
774 	}
775 	ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
776 			ree_rule_db_len, NULL, REE_NON_INC_PROG);
777 	if (ret) {
778 		cn9k_err("Could not program rule db");
779 		return -ENOSPC;
780 	}
781 	return 0;
782 }
783 
784 static int
785 cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
786 {
787 	struct cn9k_ree_data *data = dev->data->dev_private;
788 	struct roc_ree_vf *vf = &data->vf;
789 	struct ree_rule_db *ree_rule_db;
790 	uint32_t rule_dbi_len;
791 	uint32_t rule_db_len;
792 	int ret;
793 
794 	ree_func_trace();
795 
796 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
797 	if (ret)
798 		return ret;
799 
800 	if (rule_db == NULL) {
801 		rule_db_len += sizeof(struct ree_rule_db);
802 		return rule_db_len;
803 	}
804 
805 	ree_rule_db = (struct ree_rule_db *)rule_db;
806 	ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
807 			rule_db_len, NULL, 0);
808 	if (ret) {
809 		cn9k_err("Could not export rule db");
810 		return -EFAULT;
811 	}
812 	ree_rule_db->number_of_entries =
813 			rule_db_len/sizeof(struct ree_rule_db_entry);
814 	ree_rule_db->revision = REE_RULE_DB_REVISION;
815 	ree_rule_db->version = REE_RULE_DB_VERSION;
816 
817 	return 0;
818 }
819 
820 static struct rte_regexdev_ops cn9k_ree_ops = {
821 	.dev_info_get = cn9k_ree_dev_info_get,
822 	.dev_configure = cn9k_ree_dev_config,
823 	.dev_qp_setup = cn9k_ree_queue_pair_setup,
824 	.dev_start = cn9k_ree_start,
825 	.dev_stop = cn9k_ree_stop,
826 	.dev_close = cn9k_ree_close,
827 	.dev_attr_get = NULL,
828 	.dev_attr_set = NULL,
829 	.dev_rule_db_update = cn9k_ree_rule_db_update,
830 	.dev_rule_db_compile_activate = NULL,
831 	.dev_db_import = cn9k_ree_rule_db_import,
832 	.dev_db_export = cn9k_ree_rule_db_export,
833 	.dev_xstats_names_get = NULL,
834 	.dev_xstats_get = NULL,
835 	.dev_xstats_by_name_get = NULL,
836 	.dev_xstats_reset = NULL,
837 	.dev_selftest = NULL,
838 	.dev_dump = NULL,
839 };
840 
841 static int
842 cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
843 		   struct rte_pci_device *pci_dev)
844 {
845 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
846 	struct cn9k_ree_data *data;
847 	struct rte_regexdev *dev;
848 	struct roc_ree_vf *vf;
849 	int ret;
850 
851 	ret = roc_plt_init();
852 	if (ret < 0) {
853 		plt_err("Failed to initialize platform model");
854 		return ret;
855 	}
856 
857 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
858 
859 	dev = ree_dev_register(name);
860 	if (dev == NULL) {
861 		ret = -ENODEV;
862 		goto exit;
863 	}
864 
865 	dev->dev_ops = &cn9k_ree_ops;
866 	dev->device = &pci_dev->device;
867 
868 	/* Get private data space allocated */
869 	data = dev->data->dev_private;
870 	vf = &data->vf;
871 	vf->pci_dev = pci_dev;
872 	ret = roc_ree_dev_init(vf);
873 	if (ret) {
874 		plt_err("Failed to initialize roc cpt rc=%d", ret);
875 		goto dev_unregister;
876 	}
877 
878 	data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
879 			RTE_REGEX_PCRE_RULE_ANCHORED_F;
880 	data->regexdev_capa = 0;
881 	data->max_groups = REE_MAX_GROUPS;
882 	data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
883 	data->nb_rules = 0;
884 
885 	dev->state = RTE_REGEXDEV_READY;
886 	return 0;
887 
888 dev_unregister:
889 	ree_dev_unregister(dev);
890 exit:
891 	cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
892 		    pci_dev->id.vendor_id, pci_dev->id.device_id);
893 	return ret;
894 }
895 
896 static int
897 cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
898 {
899 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
900 	struct rte_regexdev *dev = NULL;
901 
902 	if (pci_dev == NULL)
903 		return -EINVAL;
904 
905 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
906 
907 	dev = rte_regexdev_get_device_by_name(name);
908 
909 	if (dev == NULL)
910 		return -ENODEV;
911 
912 	return ree_dev_fini(dev);
913 }
914 
915 static struct rte_pci_id pci_id_ree_table[] = {
916 	{
917 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
918 				PCI_DEVID_CNXK_RVU_REE_PF)
919 	},
920 	{
921 		.vendor_id = 0,
922 	}
923 };
924 
925 static struct rte_pci_driver cn9k_regexdev_pmd = {
926 	.id_table = pci_id_ree_table,
927 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
928 	.probe = cn9k_ree_pci_probe,
929 	.remove = cn9k_ree_pci_remove,
930 };
931 
932 
933 RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
934 RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);
935