xref: /dpdk/drivers/regex/cn9k/cn9k_regexdev.c (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
172c00ae9SLiron Himi /* SPDX-License-Identifier: BSD-3-Clause
272c00ae9SLiron Himi  * Copyright (C) 2020 Marvell International Ltd.
372c00ae9SLiron Himi  */
472c00ae9SLiron Himi 
572c00ae9SLiron Himi #include <stdio.h>
672c00ae9SLiron Himi #include <unistd.h>
772c00ae9SLiron Himi 
872c00ae9SLiron Himi #include <rte_malloc.h>
972c00ae9SLiron Himi #include <rte_memzone.h>
1072c00ae9SLiron Himi #include <rte_regexdev.h>
1172c00ae9SLiron Himi #include <rte_regexdev_core.h>
1272c00ae9SLiron Himi #include <rte_regexdev_driver.h>
1372c00ae9SLiron Himi 
1472c00ae9SLiron Himi 
1572c00ae9SLiron Himi /* REE common headers */
1672c00ae9SLiron Himi #include "cn9k_regexdev.h"
1772c00ae9SLiron Himi 
1872c00ae9SLiron Himi 
1972c00ae9SLiron Himi /* HW matches are at offset 0x80 from RES_PTR_ADDR
2072c00ae9SLiron Himi  * In op structure matches starts at W5 (0x28)
2172c00ae9SLiron Himi  * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
2272c00ae9SLiron Himi  * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
2372c00ae9SLiron Himi  */
2472c00ae9SLiron Himi #define REE_NUM_MATCHES_ALIGN	11
2572c00ae9SLiron Himi /* The REE co-processor will write up to 254 job match structures
2672c00ae9SLiron Himi  * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
2772c00ae9SLiron Himi  */
2872c00ae9SLiron Himi #define REE_MATCH_OFFSET	0x80
2972c00ae9SLiron Himi 
3072c00ae9SLiron Himi #define REE_MAX_RULES_PER_GROUP 0xFFFF
3172c00ae9SLiron Himi #define REE_MAX_GROUPS 0xFFFF
3272c00ae9SLiron Himi 
3372c00ae9SLiron Himi 
3472c00ae9SLiron Himi #define REE_RULE_DB_VERSION	2
3572c00ae9SLiron Himi #define REE_RULE_DB_REVISION	0
3672c00ae9SLiron Himi 
3772c00ae9SLiron Himi struct ree_rule_db_entry {
3872c00ae9SLiron Himi 	uint8_t		type;
3972c00ae9SLiron Himi 	uint32_t	addr;
4072c00ae9SLiron Himi 	uint64_t	value;
4172c00ae9SLiron Himi };
4272c00ae9SLiron Himi 
43*e7750639SAndre Muezerie struct __rte_packed_begin ree_rule_db {
4472c00ae9SLiron Himi 	uint32_t version;
4572c00ae9SLiron Himi 	uint32_t revision;
4672c00ae9SLiron Himi 	uint32_t number_of_entries;
4772c00ae9SLiron Himi 	struct ree_rule_db_entry entries[];
48*e7750639SAndre Muezerie } __rte_packed_end;
4972c00ae9SLiron Himi 
5072c00ae9SLiron Himi static void
5172c00ae9SLiron Himi qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
5272c00ae9SLiron Himi {
5372c00ae9SLiron Himi 	snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
5472c00ae9SLiron Himi }
5572c00ae9SLiron Himi 
5672c00ae9SLiron Himi static struct roc_ree_qp *
5772c00ae9SLiron Himi ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
5872c00ae9SLiron Himi {
5972c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
6072c00ae9SLiron Himi 	uint64_t pg_sz = sysconf(_SC_PAGESIZE);
6172c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
6272c00ae9SLiron Himi 	const struct rte_memzone *lf_mem;
6372c00ae9SLiron Himi 	uint32_t len, iq_len, size_div2;
6472c00ae9SLiron Himi 	char name[RTE_MEMZONE_NAMESIZE];
6572c00ae9SLiron Himi 	uint64_t used_len, iova;
6672c00ae9SLiron Himi 	struct roc_ree_qp *qp;
6772c00ae9SLiron Himi 	uint8_t *va;
6872c00ae9SLiron Himi 	int ret;
6972c00ae9SLiron Himi 
7072c00ae9SLiron Himi 	/* Allocate queue pair */
7172c00ae9SLiron Himi 	qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
7272c00ae9SLiron Himi 				ROC_ALIGN);
7372c00ae9SLiron Himi 	if (qp == NULL) {
7472c00ae9SLiron Himi 		cn9k_err("Could not allocate queue pair");
7572c00ae9SLiron Himi 		return NULL;
7672c00ae9SLiron Himi 	}
7772c00ae9SLiron Himi 
7872c00ae9SLiron Himi 	iq_len = REE_IQ_LEN;
7972c00ae9SLiron Himi 
8072c00ae9SLiron Himi 	/*
8172c00ae9SLiron Himi 	 * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
8272c00ae9SLiron Himi 	 * and a power of 2.
8372c00ae9SLiron Himi 	 * effective queue size to software is (size - 1) * 128
8472c00ae9SLiron Himi 	 */
8572c00ae9SLiron Himi 	size_div2 = iq_len >> 1;
8672c00ae9SLiron Himi 
8772c00ae9SLiron Himi 	/* For pending queue */
8872c00ae9SLiron Himi 	len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
8972c00ae9SLiron Himi 
9072c00ae9SLiron Himi 	/* So that instruction queues start as pg size aligned */
9172c00ae9SLiron Himi 	len = RTE_ALIGN(len, pg_sz);
9272c00ae9SLiron Himi 
9372c00ae9SLiron Himi 	/* For instruction queues */
9472c00ae9SLiron Himi 	len += REE_IQ_LEN * sizeof(union roc_ree_inst);
9572c00ae9SLiron Himi 
9672c00ae9SLiron Himi 	/* Waste after instruction queues */
9772c00ae9SLiron Himi 	len = RTE_ALIGN(len, pg_sz);
9872c00ae9SLiron Himi 
9972c00ae9SLiron Himi 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
10072c00ae9SLiron Himi 			    qp_id);
10172c00ae9SLiron Himi 
10272c00ae9SLiron Himi 	lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
10372c00ae9SLiron Himi 			RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
10472c00ae9SLiron Himi 			RTE_CACHE_LINE_SIZE);
10572c00ae9SLiron Himi 	if (lf_mem == NULL) {
10672c00ae9SLiron Himi 		cn9k_err("Could not allocate reserved memzone");
10772c00ae9SLiron Himi 		goto qp_free;
10872c00ae9SLiron Himi 	}
10972c00ae9SLiron Himi 
11072c00ae9SLiron Himi 	va = lf_mem->addr;
11172c00ae9SLiron Himi 	iova = lf_mem->iova;
11272c00ae9SLiron Himi 
11372c00ae9SLiron Himi 	memset(va, 0, len);
11472c00ae9SLiron Himi 
11572c00ae9SLiron Himi 	/* Initialize pending queue */
11672c00ae9SLiron Himi 	qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
11772c00ae9SLiron Himi 	qp->pend_q.enq_tail = 0;
11872c00ae9SLiron Himi 	qp->pend_q.deq_head = 0;
11972c00ae9SLiron Himi 	qp->pend_q.pending_count = 0;
12072c00ae9SLiron Himi 
12172c00ae9SLiron Himi 	used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
12272c00ae9SLiron Himi 	used_len = RTE_ALIGN(used_len, pg_sz);
12372c00ae9SLiron Himi 	iova += used_len;
12472c00ae9SLiron Himi 
12572c00ae9SLiron Himi 	qp->iq_dma_addr = iova;
12672c00ae9SLiron Himi 	qp->id = qp_id;
12772c00ae9SLiron Himi 	qp->base = roc_ree_qp_get_base(vf, qp_id);
12872c00ae9SLiron Himi 	qp->roc_regexdev_jobid = 0;
12972c00ae9SLiron Himi 	qp->write_offset = 0;
13072c00ae9SLiron Himi 
13172c00ae9SLiron Himi 	ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
13272c00ae9SLiron Himi 	if (ret) {
13372c00ae9SLiron Himi 		cn9k_err("Could not enable instruction queue");
13472c00ae9SLiron Himi 		goto qp_free;
13572c00ae9SLiron Himi 	}
13672c00ae9SLiron Himi 
13772c00ae9SLiron Himi 	return qp;
13872c00ae9SLiron Himi 
13972c00ae9SLiron Himi qp_free:
14072c00ae9SLiron Himi 	rte_free(qp);
14172c00ae9SLiron Himi 	return NULL;
14272c00ae9SLiron Himi }
14372c00ae9SLiron Himi 
14472c00ae9SLiron Himi static int
14572c00ae9SLiron Himi ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
14672c00ae9SLiron Himi {
14772c00ae9SLiron Himi 	const struct rte_memzone *lf_mem;
14872c00ae9SLiron Himi 	char name[RTE_MEMZONE_NAMESIZE];
14972c00ae9SLiron Himi 	int ret;
15072c00ae9SLiron Himi 
15172c00ae9SLiron Himi 	roc_ree_iq_disable(qp);
15272c00ae9SLiron Himi 
15372c00ae9SLiron Himi 	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
15472c00ae9SLiron Himi 			    qp->id);
15572c00ae9SLiron Himi 
15672c00ae9SLiron Himi 	lf_mem = rte_memzone_lookup(name);
15772c00ae9SLiron Himi 
15872c00ae9SLiron Himi 	ret = rte_memzone_free(lf_mem);
15972c00ae9SLiron Himi 	if (ret)
16072c00ae9SLiron Himi 		return ret;
16172c00ae9SLiron Himi 
16272c00ae9SLiron Himi 	rte_free(qp);
16372c00ae9SLiron Himi 
16472c00ae9SLiron Himi 	return 0;
16572c00ae9SLiron Himi }
16672c00ae9SLiron Himi 
16772c00ae9SLiron Himi static int
16872c00ae9SLiron Himi ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
16972c00ae9SLiron Himi {
17072c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
17172c00ae9SLiron Himi 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
17272c00ae9SLiron Himi 	int ret;
17372c00ae9SLiron Himi 
17472c00ae9SLiron Himi 	ree_func_trace("Queue=%d", qp_id);
17572c00ae9SLiron Himi 
17672c00ae9SLiron Himi 	if (qp == NULL)
17772c00ae9SLiron Himi 		return -EINVAL;
17872c00ae9SLiron Himi 
17972c00ae9SLiron Himi 	ret = ree_qp_destroy(dev, qp);
18072c00ae9SLiron Himi 	if (ret) {
18172c00ae9SLiron Himi 		cn9k_err("Could not destroy queue pair %d", qp_id);
18272c00ae9SLiron Himi 		return ret;
18372c00ae9SLiron Himi 	}
18472c00ae9SLiron Himi 
18572c00ae9SLiron Himi 	data->queue_pairs[qp_id] = NULL;
18672c00ae9SLiron Himi 
18772c00ae9SLiron Himi 	return 0;
18872c00ae9SLiron Himi }
18972c00ae9SLiron Himi 
19072c00ae9SLiron Himi static struct rte_regexdev *
19172c00ae9SLiron Himi ree_dev_register(const char *name)
19272c00ae9SLiron Himi {
19372c00ae9SLiron Himi 	struct rte_regexdev *dev;
19472c00ae9SLiron Himi 
195f665790aSDavid Marchand 	cn9k_ree_dbg("Creating regexdev %s", name);
19672c00ae9SLiron Himi 
19772c00ae9SLiron Himi 	/* allocate device structure */
19872c00ae9SLiron Himi 	dev = rte_regexdev_register(name);
19972c00ae9SLiron Himi 	if (dev == NULL) {
20072c00ae9SLiron Himi 		cn9k_err("Failed to allocate regex device for %s", name);
20172c00ae9SLiron Himi 		return NULL;
20272c00ae9SLiron Himi 	}
20372c00ae9SLiron Himi 
20472c00ae9SLiron Himi 	/* allocate private device structure */
20572c00ae9SLiron Himi 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
20672c00ae9SLiron Himi 		dev->data->dev_private =
20772c00ae9SLiron Himi 				rte_zmalloc_socket("regexdev device private",
20872c00ae9SLiron Himi 						sizeof(struct cn9k_ree_data),
20972c00ae9SLiron Himi 						RTE_CACHE_LINE_SIZE,
21072c00ae9SLiron Himi 						rte_socket_id());
21172c00ae9SLiron Himi 
21272c00ae9SLiron Himi 		if (dev->data->dev_private == NULL) {
21372c00ae9SLiron Himi 			cn9k_err("Cannot allocate memory for dev %s private data",
21472c00ae9SLiron Himi 					name);
21572c00ae9SLiron Himi 
21672c00ae9SLiron Himi 			rte_regexdev_unregister(dev);
21772c00ae9SLiron Himi 			return NULL;
21872c00ae9SLiron Himi 		}
21972c00ae9SLiron Himi 	}
22072c00ae9SLiron Himi 
22172c00ae9SLiron Himi 	return dev;
22272c00ae9SLiron Himi }
22372c00ae9SLiron Himi 
22472c00ae9SLiron Himi static int
22572c00ae9SLiron Himi ree_dev_unregister(struct rte_regexdev *dev)
22672c00ae9SLiron Himi {
22772c00ae9SLiron Himi 	cn9k_ree_dbg("Closing regex device %s", dev->device->name);
22872c00ae9SLiron Himi 
22972c00ae9SLiron Himi 	/* free regex device */
23072c00ae9SLiron Himi 	rte_regexdev_unregister(dev);
23172c00ae9SLiron Himi 
23272c00ae9SLiron Himi 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
23372c00ae9SLiron Himi 		rte_free(dev->data->dev_private);
23472c00ae9SLiron Himi 
23572c00ae9SLiron Himi 	return 0;
23672c00ae9SLiron Himi }
23772c00ae9SLiron Himi 
23872c00ae9SLiron Himi static int
23972c00ae9SLiron Himi ree_dev_fini(struct rte_regexdev *dev)
24072c00ae9SLiron Himi {
24172c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
24272c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
24372c00ae9SLiron Himi 	int i, ret;
24472c00ae9SLiron Himi 
24572c00ae9SLiron Himi 	ree_func_trace();
24672c00ae9SLiron Himi 
24772c00ae9SLiron Himi 	for (i = 0; i < data->nb_queue_pairs; i++) {
24872c00ae9SLiron Himi 		ret = ree_queue_pair_release(dev, i);
24972c00ae9SLiron Himi 		if (ret)
25072c00ae9SLiron Himi 			return ret;
25172c00ae9SLiron Himi 	}
25272c00ae9SLiron Himi 
25372c00ae9SLiron Himi 	ret = roc_ree_queues_detach(vf);
25472c00ae9SLiron Himi 	if (ret)
25572c00ae9SLiron Himi 		cn9k_err("Could not detach queues");
25672c00ae9SLiron Himi 
25772c00ae9SLiron Himi 	/* TEMP : should be in lib */
25872c00ae9SLiron Himi 	rte_free(data->queue_pairs);
25972c00ae9SLiron Himi 	rte_free(data->rules);
26072c00ae9SLiron Himi 
26172c00ae9SLiron Himi 	roc_ree_dev_fini(vf);
26272c00ae9SLiron Himi 
26372c00ae9SLiron Himi 	ret = ree_dev_unregister(dev);
26472c00ae9SLiron Himi 	if (ret)
26572c00ae9SLiron Himi 		cn9k_err("Could not destroy PMD");
26672c00ae9SLiron Himi 
26772c00ae9SLiron Himi 	return ret;
26872c00ae9SLiron Himi }
26972c00ae9SLiron Himi 
27072c00ae9SLiron Himi static inline int
27172c00ae9SLiron Himi ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
27272c00ae9SLiron Himi 		 struct roc_ree_pending_queue *pend_q)
27372c00ae9SLiron Himi {
27472c00ae9SLiron Himi 	union roc_ree_inst inst;
27572c00ae9SLiron Himi 	union ree_res *res;
27672c00ae9SLiron Himi 	uint32_t offset;
27772c00ae9SLiron Himi 
27872c00ae9SLiron Himi 	if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
27972c00ae9SLiron Himi 		cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
28072c00ae9SLiron Himi 		pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
28172c00ae9SLiron Himi 		return -EAGAIN;
28272c00ae9SLiron Himi 	}
28372c00ae9SLiron Himi 	if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
28472c00ae9SLiron Himi 			op->mbuf->data_len == 0)) {
28572c00ae9SLiron Himi 		cn9k_err("Packet length %d is greater than MAX payload %d",
28672c00ae9SLiron Himi 				op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
28772c00ae9SLiron Himi 		return -EAGAIN;
28872c00ae9SLiron Himi 	}
28972c00ae9SLiron Himi 
29072c00ae9SLiron Himi 	/* W 0 */
29172c00ae9SLiron Himi 	inst.cn98xx.ooj = 1;
29272c00ae9SLiron Himi 	inst.cn98xx.dg = 0;
29372c00ae9SLiron Himi 	inst.cn98xx.doneint = 0;
29472c00ae9SLiron Himi 	/* W 1 */
29572c00ae9SLiron Himi 	inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
29672c00ae9SLiron Himi 	/* W 2 */
29772c00ae9SLiron Himi 	inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
29872c00ae9SLiron Himi 	inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
29972c00ae9SLiron Himi 
30072c00ae9SLiron Himi 	/* W 3 */
30172c00ae9SLiron Himi 	inst.cn98xx.res_ptr_addr = (uint64_t)op;
30272c00ae9SLiron Himi 	/* W 4 */
30372c00ae9SLiron Himi 	inst.cn98xx.wq_ptr = 0;
30472c00ae9SLiron Himi 	/* W 5 */
30572c00ae9SLiron Himi 	inst.cn98xx.ggrp = 0;
30672c00ae9SLiron Himi 	inst.cn98xx.tt = 0;
30772c00ae9SLiron Himi 	inst.cn98xx.tag = 0;
30872c00ae9SLiron Himi 	/* W 6 */
30972c00ae9SLiron Himi 	inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
31072c00ae9SLiron Himi 	if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
31172c00ae9SLiron Himi 		inst.cn98xx.ree_job_ctrl = (0x2 << 8);
31272c00ae9SLiron Himi 	else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
31372c00ae9SLiron Himi 		inst.cn98xx.ree_job_ctrl = (0x1 << 8);
31472c00ae9SLiron Himi 	else
31572c00ae9SLiron Himi 		inst.cn98xx.ree_job_ctrl = 0;
31672c00ae9SLiron Himi 	inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
31772c00ae9SLiron Himi 	/* W 7 */
31872c00ae9SLiron Himi 	inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
31972c00ae9SLiron Himi 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
32072c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
32172c00ae9SLiron Himi 	else
32272c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
32372c00ae9SLiron Himi 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
32472c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
32572c00ae9SLiron Himi 	else
32672c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
32772c00ae9SLiron Himi 	if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
32872c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
32972c00ae9SLiron Himi 	else
33072c00ae9SLiron Himi 		inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
33172c00ae9SLiron Himi 
33272c00ae9SLiron Himi 	/* Copy REE command to Q */
33372c00ae9SLiron Himi 	offset = qp->write_offset * sizeof(inst);
33472c00ae9SLiron Himi 	memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
33572c00ae9SLiron Himi 
33672c00ae9SLiron Himi 	pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
33772c00ae9SLiron Himi 	pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
33872c00ae9SLiron Himi 
33972c00ae9SLiron Himi 	/* Mark result as not done */
34072c00ae9SLiron Himi 	res = (union ree_res *)(op);
34172c00ae9SLiron Himi 	res->s.done = 0;
34272c00ae9SLiron Himi 	res->s.ree_err = 0;
34372c00ae9SLiron Himi 
34472c00ae9SLiron Himi 	/* We will use soft queue length here to limit requests */
34572c00ae9SLiron Himi 	REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
34672c00ae9SLiron Himi 	pend_q->pending_count += 1;
34772c00ae9SLiron Himi 	REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
34872c00ae9SLiron Himi 	REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
34972c00ae9SLiron Himi 
35072c00ae9SLiron Himi 	return 0;
35172c00ae9SLiron Himi }
35272c00ae9SLiron Himi 
35372c00ae9SLiron Himi static uint16_t
35472c00ae9SLiron Himi cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
35572c00ae9SLiron Himi 		       struct rte_regex_ops **ops, uint16_t nb_ops)
35672c00ae9SLiron Himi {
35772c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
35872c00ae9SLiron Himi 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
35972c00ae9SLiron Himi 	struct roc_ree_pending_queue *pend_q;
36072c00ae9SLiron Himi 	uint16_t nb_allowed, count = 0;
36172c00ae9SLiron Himi 	struct rte_regex_ops *op;
36272c00ae9SLiron Himi 	int ret;
36372c00ae9SLiron Himi 
36472c00ae9SLiron Himi 	pend_q = &qp->pend_q;
36572c00ae9SLiron Himi 
36672c00ae9SLiron Himi 	nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
36772c00ae9SLiron Himi 	if (nb_ops > nb_allowed)
36872c00ae9SLiron Himi 		nb_ops = nb_allowed;
36972c00ae9SLiron Himi 
37072c00ae9SLiron Himi 	for (count = 0; count < nb_ops; count++) {
37172c00ae9SLiron Himi 		op = ops[count];
37272c00ae9SLiron Himi 		ret = ree_enqueue(qp, op, pend_q);
37372c00ae9SLiron Himi 
37472c00ae9SLiron Himi 		if (unlikely(ret))
37572c00ae9SLiron Himi 			break;
37672c00ae9SLiron Himi 	}
37772c00ae9SLiron Himi 
37872c00ae9SLiron Himi 	/*
37972c00ae9SLiron Himi 	 * Make sure all instructions are written before DOORBELL is activated
38072c00ae9SLiron Himi 	 */
38172c00ae9SLiron Himi 	rte_io_wmb();
38272c00ae9SLiron Himi 
38372c00ae9SLiron Himi 	/* Update Doorbell */
38472c00ae9SLiron Himi 	plt_write64(count, qp->base + REE_LF_DOORBELL);
38572c00ae9SLiron Himi 
38672c00ae9SLiron Himi 	return count;
38772c00ae9SLiron Himi }
38872c00ae9SLiron Himi 
38972c00ae9SLiron Himi static inline void
39072c00ae9SLiron Himi ree_dequeue_post_process(struct rte_regex_ops *ops)
39172c00ae9SLiron Himi {
39272c00ae9SLiron Himi 	uint8_t ree_res_mcnt, ree_res_dmcnt;
39372c00ae9SLiron Himi 	int off = REE_MATCH_OFFSET;
39472c00ae9SLiron Himi 	struct ree_res_s_98 *res;
39572c00ae9SLiron Himi 	uint16_t ree_res_status;
39672c00ae9SLiron Himi 	uint64_t match;
39772c00ae9SLiron Himi 
39872c00ae9SLiron Himi 	res = (struct ree_res_s_98 *)ops;
39972c00ae9SLiron Himi 	/* store res values on stack since ops and res
40072c00ae9SLiron Himi 	 * are using the same memory
40172c00ae9SLiron Himi 	 */
40272c00ae9SLiron Himi 	ree_res_status = res->ree_res_status;
40372c00ae9SLiron Himi 	ree_res_mcnt = res->ree_res_mcnt;
40472c00ae9SLiron Himi 	ree_res_dmcnt = res->ree_res_dmcnt;
40572c00ae9SLiron Himi 	ops->rsp_flags = 0;
40672c00ae9SLiron Himi 	ops->nb_actual_matches = ree_res_dmcnt;
40772c00ae9SLiron Himi 	ops->nb_matches = ree_res_mcnt;
40872c00ae9SLiron Himi 	if (unlikely(res->ree_err)) {
40972c00ae9SLiron Himi 		ops->nb_actual_matches = 0;
41072c00ae9SLiron Himi 		ops->nb_matches = 0;
41172c00ae9SLiron Himi 	}
41272c00ae9SLiron Himi 
41372c00ae9SLiron Himi 	if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
41472c00ae9SLiron Himi 		if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
41572c00ae9SLiron Himi 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
41672c00ae9SLiron Himi 		if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
41772c00ae9SLiron Himi 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
41872c00ae9SLiron Himi 		if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
41972c00ae9SLiron Himi 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
42072c00ae9SLiron Himi 		if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
42172c00ae9SLiron Himi 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
42272c00ae9SLiron Himi 		if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
42372c00ae9SLiron Himi 			ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
42472c00ae9SLiron Himi 	}
42572c00ae9SLiron Himi 	if (ops->nb_matches > 0) {
42672c00ae9SLiron Himi 		/* Move the matches to the correct offset */
42772c00ae9SLiron Himi 		off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
42872c00ae9SLiron Himi 			ops->nb_matches : REE_NUM_MATCHES_ALIGN);
42972c00ae9SLiron Himi 		match = (uint64_t)ops + REE_MATCH_OFFSET;
43072c00ae9SLiron Himi 		match += (ops->nb_matches - off) *
43172c00ae9SLiron Himi 			sizeof(union ree_match);
43272c00ae9SLiron Himi 		memcpy((void *)ops->matches, (void *)match,
43372c00ae9SLiron Himi 			off * sizeof(union ree_match));
43472c00ae9SLiron Himi 	}
43572c00ae9SLiron Himi }
43672c00ae9SLiron Himi 
43772c00ae9SLiron Himi static uint16_t
43872c00ae9SLiron Himi cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
43972c00ae9SLiron Himi 		       struct rte_regex_ops **ops, uint16_t nb_ops)
44072c00ae9SLiron Himi {
44172c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
44272c00ae9SLiron Himi 	struct roc_ree_qp *qp = data->queue_pairs[qp_id];
44372c00ae9SLiron Himi 	struct roc_ree_pending_queue *pend_q;
44472c00ae9SLiron Himi 	int i, nb_pending, nb_completed = 0;
44572c00ae9SLiron Himi 	volatile struct ree_res_s_98 *res;
44672c00ae9SLiron Himi 	struct roc_ree_rid *rid;
44772c00ae9SLiron Himi 
44872c00ae9SLiron Himi 	pend_q = &qp->pend_q;
44972c00ae9SLiron Himi 
45072c00ae9SLiron Himi 	nb_pending = pend_q->pending_count;
45172c00ae9SLiron Himi 
45272c00ae9SLiron Himi 	if (nb_ops > nb_pending)
45372c00ae9SLiron Himi 		nb_ops = nb_pending;
45472c00ae9SLiron Himi 
45572c00ae9SLiron Himi 	for (i = 0; i < nb_ops; i++) {
45672c00ae9SLiron Himi 		rid = &pend_q->rid_queue[pend_q->deq_head];
45772c00ae9SLiron Himi 		res = (volatile struct ree_res_s_98 *)(rid->rid);
45872c00ae9SLiron Himi 
45972c00ae9SLiron Himi 		/* Check response header done bit if completed */
46072c00ae9SLiron Himi 		if (unlikely(!res->done))
46172c00ae9SLiron Himi 			break;
46272c00ae9SLiron Himi 
46372c00ae9SLiron Himi 		ops[i] = (struct rte_regex_ops *)(rid->rid);
46472c00ae9SLiron Himi 		ops[i]->user_id = rid->user_id;
46572c00ae9SLiron Himi 
46672c00ae9SLiron Himi 		REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
46772c00ae9SLiron Himi 		pend_q->pending_count -= 1;
46872c00ae9SLiron Himi 	}
46972c00ae9SLiron Himi 
47072c00ae9SLiron Himi 	nb_completed = i;
47172c00ae9SLiron Himi 
47272c00ae9SLiron Himi 	for (i = 0; i < nb_completed; i++)
47372c00ae9SLiron Himi 		ree_dequeue_post_process(ops[i]);
47472c00ae9SLiron Himi 
47572c00ae9SLiron Himi 	return nb_completed;
47672c00ae9SLiron Himi }
47772c00ae9SLiron Himi 
47872c00ae9SLiron Himi static int
47972c00ae9SLiron Himi cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
48072c00ae9SLiron Himi {
48172c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
48272c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
48372c00ae9SLiron Himi 
48472c00ae9SLiron Himi 	ree_func_trace();
48572c00ae9SLiron Himi 
48672c00ae9SLiron Himi 	if (info == NULL)
48772c00ae9SLiron Himi 		return -EINVAL;
48872c00ae9SLiron Himi 
48972c00ae9SLiron Himi 	info->driver_name = dev->device->driver->name;
49072c00ae9SLiron Himi 	info->dev = dev->device;
49172c00ae9SLiron Himi 
49272c00ae9SLiron Himi 	info->max_queue_pairs = vf->max_queues;
49372c00ae9SLiron Himi 	info->max_matches = vf->max_matches;
49472c00ae9SLiron Himi 	info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
49572c00ae9SLiron Himi 	info->max_rules_per_group = data->max_rules_per_group;
49672c00ae9SLiron Himi 	info->max_groups = data->max_groups;
49772c00ae9SLiron Himi 	info->regexdev_capa = data->regexdev_capa;
49872c00ae9SLiron Himi 	info->rule_flags = data->rule_flags;
49972c00ae9SLiron Himi 
50072c00ae9SLiron Himi 	return 0;
50172c00ae9SLiron Himi }
50272c00ae9SLiron Himi 
50372c00ae9SLiron Himi static int
50472c00ae9SLiron Himi cn9k_ree_dev_config(struct rte_regexdev *dev,
50572c00ae9SLiron Himi 		    const struct rte_regexdev_config *cfg)
50672c00ae9SLiron Himi {
50772c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
50872c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
50972c00ae9SLiron Himi 	const struct ree_rule_db *rule_db;
51072c00ae9SLiron Himi 	uint32_t rule_db_len;
51172c00ae9SLiron Himi 	int ret;
51272c00ae9SLiron Himi 
51372c00ae9SLiron Himi 	ree_func_trace();
51472c00ae9SLiron Himi 
51572c00ae9SLiron Himi 	if (cfg->nb_queue_pairs > vf->max_queues) {
51672c00ae9SLiron Himi 		cn9k_err("Invalid number of queue pairs requested");
51772c00ae9SLiron Himi 		return -EINVAL;
51872c00ae9SLiron Himi 	}
51972c00ae9SLiron Himi 
52072c00ae9SLiron Himi 	if (cfg->nb_max_matches != vf->max_matches) {
52172c00ae9SLiron Himi 		cn9k_err("Invalid number of max matches requested");
52272c00ae9SLiron Himi 		return -EINVAL;
52372c00ae9SLiron Himi 	}
52472c00ae9SLiron Himi 
52572c00ae9SLiron Himi 	if (cfg->dev_cfg_flags != 0) {
52672c00ae9SLiron Himi 		cn9k_err("Invalid device configuration flags requested");
52772c00ae9SLiron Himi 		return -EINVAL;
52872c00ae9SLiron Himi 	}
52972c00ae9SLiron Himi 
53072c00ae9SLiron Himi 	/* Unregister error interrupts */
53172c00ae9SLiron Himi 	if (vf->err_intr_registered)
53272c00ae9SLiron Himi 		roc_ree_err_intr_unregister(vf);
53372c00ae9SLiron Himi 
53472c00ae9SLiron Himi 	/* Detach queues */
53572c00ae9SLiron Himi 	if (vf->nb_queues) {
53672c00ae9SLiron Himi 		ret = roc_ree_queues_detach(vf);
53772c00ae9SLiron Himi 		if (ret) {
53872c00ae9SLiron Himi 			cn9k_err("Could not detach REE queues");
53972c00ae9SLiron Himi 			return ret;
54072c00ae9SLiron Himi 		}
54172c00ae9SLiron Himi 	}
54272c00ae9SLiron Himi 
54372c00ae9SLiron Himi 	/* TEMP : should be in lib */
54472c00ae9SLiron Himi 	if (data->queue_pairs == NULL) { /* first time configuration */
54572c00ae9SLiron Himi 		data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
54672c00ae9SLiron Himi 				sizeof(data->queue_pairs[0]) *
54772c00ae9SLiron Himi 				cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
54872c00ae9SLiron Himi 
54972c00ae9SLiron Himi 		if (data->queue_pairs == NULL) {
55072c00ae9SLiron Himi 			data->nb_queue_pairs = 0;
55172c00ae9SLiron Himi 			cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
55272c00ae9SLiron Himi 					cfg->nb_queue_pairs);
55372c00ae9SLiron Himi 			return -ENOMEM;
55472c00ae9SLiron Himi 		}
55572c00ae9SLiron Himi 	} else { /* re-configure */
55672c00ae9SLiron Himi 		uint16_t old_nb_queues = data->nb_queue_pairs;
55772c00ae9SLiron Himi 		void **qp;
55872c00ae9SLiron Himi 		unsigned int i;
55972c00ae9SLiron Himi 
56072c00ae9SLiron Himi 		qp = data->queue_pairs;
56172c00ae9SLiron Himi 
56272c00ae9SLiron Himi 		for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
56372c00ae9SLiron Himi 			ret = ree_queue_pair_release(dev, i);
56472c00ae9SLiron Himi 			if (ret < 0)
56572c00ae9SLiron Himi 				return ret;
56672c00ae9SLiron Himi 		}
56772c00ae9SLiron Himi 
56872c00ae9SLiron Himi 		qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
56972c00ae9SLiron Himi 				RTE_CACHE_LINE_SIZE);
57072c00ae9SLiron Himi 		if (qp == NULL) {
57172c00ae9SLiron Himi 			cn9k_err("Failed to realloc qp meta data, nb_queues %u",
57272c00ae9SLiron Himi 					cfg->nb_queue_pairs);
57372c00ae9SLiron Himi 			return -ENOMEM;
57472c00ae9SLiron Himi 		}
57572c00ae9SLiron Himi 
57672c00ae9SLiron Himi 		if (cfg->nb_queue_pairs > old_nb_queues) {
57772c00ae9SLiron Himi 			uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
57872c00ae9SLiron Himi 			memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
57972c00ae9SLiron Himi 		}
58072c00ae9SLiron Himi 
58172c00ae9SLiron Himi 		data->queue_pairs = qp;
58272c00ae9SLiron Himi 	}
58372c00ae9SLiron Himi 	data->nb_queue_pairs = cfg->nb_queue_pairs;
58472c00ae9SLiron Himi 
58572c00ae9SLiron Himi 	/* Attach queues */
58672c00ae9SLiron Himi 	cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
58772c00ae9SLiron Himi 	ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
58872c00ae9SLiron Himi 	if (ret) {
58972c00ae9SLiron Himi 		cn9k_err("Could not attach queues");
59072c00ae9SLiron Himi 		return -ENODEV;
59172c00ae9SLiron Himi 	}
59272c00ae9SLiron Himi 
59372c00ae9SLiron Himi 	ret = roc_ree_msix_offsets_get(vf);
59472c00ae9SLiron Himi 	if (ret) {
59572c00ae9SLiron Himi 		cn9k_err("Could not get MSI-X offsets");
59672c00ae9SLiron Himi 		goto queues_detach;
59772c00ae9SLiron Himi 	}
59872c00ae9SLiron Himi 
59972c00ae9SLiron Himi 	if (cfg->rule_db && cfg->rule_db_len) {
60072c00ae9SLiron Himi 		cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
60172c00ae9SLiron Himi 		rule_db = (const struct ree_rule_db *)cfg->rule_db;
60272c00ae9SLiron Himi 		rule_db_len = rule_db->number_of_entries *
60372c00ae9SLiron Himi 				sizeof(struct ree_rule_db_entry);
60472c00ae9SLiron Himi 		cn9k_ree_dbg("rule_db number of entries %d",
60572c00ae9SLiron Himi 				rule_db->number_of_entries);
60672c00ae9SLiron Himi 		if (rule_db_len > cfg->rule_db_len) {
60772c00ae9SLiron Himi 			cn9k_err("Could not program rule db");
60872c00ae9SLiron Himi 			ret = -EINVAL;
60972c00ae9SLiron Himi 			goto queues_detach;
61072c00ae9SLiron Himi 		}
61172c00ae9SLiron Himi 		ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
61272c00ae9SLiron Himi 				rule_db_len, NULL, REE_NON_INC_PROG);
61372c00ae9SLiron Himi 		if (ret) {
61472c00ae9SLiron Himi 			cn9k_err("Could not program rule db");
61572c00ae9SLiron Himi 			goto queues_detach;
61672c00ae9SLiron Himi 		}
61772c00ae9SLiron Himi 	}
61872c00ae9SLiron Himi 
61972c00ae9SLiron Himi 	dev->enqueue = cn9k_ree_enqueue_burst;
62072c00ae9SLiron Himi 	dev->dequeue = cn9k_ree_dequeue_burst;
62172c00ae9SLiron Himi 
62272c00ae9SLiron Himi 	rte_mb();
62372c00ae9SLiron Himi 	return 0;
62472c00ae9SLiron Himi 
62572c00ae9SLiron Himi queues_detach:
62672c00ae9SLiron Himi 	roc_ree_queues_detach(vf);
62772c00ae9SLiron Himi 	return ret;
62872c00ae9SLiron Himi }
62972c00ae9SLiron Himi 
63072c00ae9SLiron Himi static int
63172c00ae9SLiron Himi cn9k_ree_stop(struct rte_regexdev *dev)
63272c00ae9SLiron Himi {
63372c00ae9SLiron Himi 	RTE_SET_USED(dev);
63472c00ae9SLiron Himi 
63572c00ae9SLiron Himi 	ree_func_trace();
63672c00ae9SLiron Himi 	return 0;
63772c00ae9SLiron Himi }
63872c00ae9SLiron Himi 
63972c00ae9SLiron Himi static int
64072c00ae9SLiron Himi cn9k_ree_start(struct rte_regexdev *dev)
64172c00ae9SLiron Himi {
64272c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
64372c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
64472c00ae9SLiron Himi 	uint32_t rule_db_len = 0;
64572c00ae9SLiron Himi 	int ret;
64672c00ae9SLiron Himi 
64772c00ae9SLiron Himi 	ree_func_trace();
64872c00ae9SLiron Himi 
64972c00ae9SLiron Himi 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
65072c00ae9SLiron Himi 	if (ret)
65172c00ae9SLiron Himi 		return ret;
65272c00ae9SLiron Himi 	if (rule_db_len == 0) {
65372c00ae9SLiron Himi 		cn9k_err("Rule db not programmed");
65472c00ae9SLiron Himi 		return -EFAULT;
65572c00ae9SLiron Himi 	}
65672c00ae9SLiron Himi 
65772c00ae9SLiron Himi 	return 0;
65872c00ae9SLiron Himi }
65972c00ae9SLiron Himi 
66072c00ae9SLiron Himi static int
66172c00ae9SLiron Himi cn9k_ree_close(struct rte_regexdev *dev)
66272c00ae9SLiron Himi {
66372c00ae9SLiron Himi 	return ree_dev_fini(dev);
66472c00ae9SLiron Himi }
66572c00ae9SLiron Himi 
66672c00ae9SLiron Himi static int
66772c00ae9SLiron Himi cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
66872c00ae9SLiron Himi 		const struct rte_regexdev_qp_conf *qp_conf)
66972c00ae9SLiron Himi {
67072c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
67172c00ae9SLiron Himi 	struct roc_ree_qp *qp;
67272c00ae9SLiron Himi 
67372c00ae9SLiron Himi 	ree_func_trace("Queue=%d", qp_id);
67472c00ae9SLiron Himi 
67572c00ae9SLiron Himi 	if (data->queue_pairs[qp_id] != NULL)
67672c00ae9SLiron Himi 		ree_queue_pair_release(dev, qp_id);
67772c00ae9SLiron Himi 
67872c00ae9SLiron Himi 	if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
67972c00ae9SLiron Himi 		cn9k_err("Could not setup queue pair for %u descriptors",
68072c00ae9SLiron Himi 				qp_conf->nb_desc);
68172c00ae9SLiron Himi 		return -EINVAL;
68272c00ae9SLiron Himi 	}
68372c00ae9SLiron Himi 	if (qp_conf->qp_conf_flags != 0) {
68472c00ae9SLiron Himi 		cn9k_err("Could not setup queue pair with configuration flags 0x%x",
68572c00ae9SLiron Himi 				qp_conf->qp_conf_flags);
68672c00ae9SLiron Himi 		return -EINVAL;
68772c00ae9SLiron Himi 	}
68872c00ae9SLiron Himi 
68972c00ae9SLiron Himi 	qp = ree_qp_create(dev, qp_id);
69072c00ae9SLiron Himi 	if (qp == NULL) {
69172c00ae9SLiron Himi 		cn9k_err("Could not create queue pair %d", qp_id);
69272c00ae9SLiron Himi 		return -ENOMEM;
69372c00ae9SLiron Himi 	}
69472c00ae9SLiron Himi 	data->queue_pairs[qp_id] = qp;
69572c00ae9SLiron Himi 
69672c00ae9SLiron Himi 	return 0;
69772c00ae9SLiron Himi }
69872c00ae9SLiron Himi 
69972c00ae9SLiron Himi static int
70072c00ae9SLiron Himi cn9k_ree_rule_db_update(struct rte_regexdev *dev,
70172c00ae9SLiron Himi 		const struct rte_regexdev_rule *rules, uint16_t nb_rules)
70272c00ae9SLiron Himi {
70372c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
70472c00ae9SLiron Himi 	struct rte_regexdev_rule *old_ptr;
70572c00ae9SLiron Himi 	uint32_t i, sum_nb_rules;
70672c00ae9SLiron Himi 
70772c00ae9SLiron Himi 	ree_func_trace("nb_rules=%d", nb_rules);
70872c00ae9SLiron Himi 
70972c00ae9SLiron Himi 	for (i = 0; i < nb_rules; i++) {
71072c00ae9SLiron Himi 		if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
71172c00ae9SLiron Himi 			break;
71272c00ae9SLiron Himi 		if (rules[i].group_id >= data->max_groups)
71372c00ae9SLiron Himi 			break;
71472c00ae9SLiron Himi 		if (rules[i].rule_id >= data->max_rules_per_group)
71572c00ae9SLiron Himi 			break;
71672c00ae9SLiron Himi 		/* logical implication
71772c00ae9SLiron Himi 		 * p    q    p -> q
71872c00ae9SLiron Himi 		 * 0    0      1
71972c00ae9SLiron Himi 		 * 0    1      1
72072c00ae9SLiron Himi 		 * 1    0      0
72172c00ae9SLiron Himi 		 * 1    1      1
72272c00ae9SLiron Himi 		 */
72372c00ae9SLiron Himi 		if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
72472c00ae9SLiron Himi 			break;
72572c00ae9SLiron Himi 	}
72672c00ae9SLiron Himi 	nb_rules = i;
72772c00ae9SLiron Himi 
72872c00ae9SLiron Himi 	if (data->nb_rules == 0) {
72972c00ae9SLiron Himi 
73072c00ae9SLiron Himi 		data->rules = rte_malloc("rte_regexdev_rules",
73172c00ae9SLiron Himi 				nb_rules*sizeof(struct rte_regexdev_rule), 0);
73272c00ae9SLiron Himi 		if (data->rules == NULL)
73372c00ae9SLiron Himi 			return -ENOMEM;
73472c00ae9SLiron Himi 
73572c00ae9SLiron Himi 		memcpy(data->rules, rules,
73672c00ae9SLiron Himi 				nb_rules*sizeof(struct rte_regexdev_rule));
73772c00ae9SLiron Himi 		data->nb_rules = nb_rules;
73872c00ae9SLiron Himi 	} else {
73972c00ae9SLiron Himi 
74072c00ae9SLiron Himi 		old_ptr = data->rules;
74172c00ae9SLiron Himi 		sum_nb_rules = data->nb_rules + nb_rules;
74272c00ae9SLiron Himi 		data->rules = rte_realloc(data->rules,
74372c00ae9SLiron Himi 				sum_nb_rules * sizeof(struct rte_regexdev_rule),
74472c00ae9SLiron Himi 							0);
74572c00ae9SLiron Himi 		if (data->rules == NULL) {
74672c00ae9SLiron Himi 			data->rules = old_ptr;
74772c00ae9SLiron Himi 			return -ENOMEM;
74872c00ae9SLiron Himi 		}
74972c00ae9SLiron Himi 		memcpy(&data->rules[data->nb_rules], rules,
75072c00ae9SLiron Himi 				nb_rules*sizeof(struct rte_regexdev_rule));
75172c00ae9SLiron Himi 		data->nb_rules = sum_nb_rules;
75272c00ae9SLiron Himi 	}
75372c00ae9SLiron Himi 	return nb_rules;
75472c00ae9SLiron Himi }
75572c00ae9SLiron Himi 
75672c00ae9SLiron Himi static int
75772c00ae9SLiron Himi cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
75872c00ae9SLiron Himi 		uint32_t rule_db_len)
75972c00ae9SLiron Himi {
76072c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
76172c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
76272c00ae9SLiron Himi 	const struct ree_rule_db *ree_rule_db;
76372c00ae9SLiron Himi 	uint32_t ree_rule_db_len;
76472c00ae9SLiron Himi 	int ret;
76572c00ae9SLiron Himi 
76672c00ae9SLiron Himi 	ree_func_trace("rule_db_len=%d", rule_db_len);
76772c00ae9SLiron Himi 
76872c00ae9SLiron Himi 	ree_rule_db = (const struct ree_rule_db *)rule_db;
76972c00ae9SLiron Himi 	ree_rule_db_len = ree_rule_db->number_of_entries *
77072c00ae9SLiron Himi 			sizeof(struct ree_rule_db_entry);
77172c00ae9SLiron Himi 	if (ree_rule_db_len > rule_db_len) {
77272c00ae9SLiron Himi 		cn9k_err("Could not program rule db");
77372c00ae9SLiron Himi 		return -EINVAL;
77472c00ae9SLiron Himi 	}
77572c00ae9SLiron Himi 	ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
77672c00ae9SLiron Himi 			ree_rule_db_len, NULL, REE_NON_INC_PROG);
77772c00ae9SLiron Himi 	if (ret) {
77872c00ae9SLiron Himi 		cn9k_err("Could not program rule db");
77972c00ae9SLiron Himi 		return -ENOSPC;
78072c00ae9SLiron Himi 	}
78172c00ae9SLiron Himi 	return 0;
78272c00ae9SLiron Himi }
78372c00ae9SLiron Himi 
78472c00ae9SLiron Himi static int
78572c00ae9SLiron Himi cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
78672c00ae9SLiron Himi {
78772c00ae9SLiron Himi 	struct cn9k_ree_data *data = dev->data->dev_private;
78872c00ae9SLiron Himi 	struct roc_ree_vf *vf = &data->vf;
78972c00ae9SLiron Himi 	struct ree_rule_db *ree_rule_db;
79072c00ae9SLiron Himi 	uint32_t rule_dbi_len;
79172c00ae9SLiron Himi 	uint32_t rule_db_len;
79272c00ae9SLiron Himi 	int ret;
79372c00ae9SLiron Himi 
79472c00ae9SLiron Himi 	ree_func_trace();
79572c00ae9SLiron Himi 
79672c00ae9SLiron Himi 	ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
79772c00ae9SLiron Himi 	if (ret)
79872c00ae9SLiron Himi 		return ret;
79972c00ae9SLiron Himi 
80072c00ae9SLiron Himi 	if (rule_db == NULL) {
80172c00ae9SLiron Himi 		rule_db_len += sizeof(struct ree_rule_db);
80272c00ae9SLiron Himi 		return rule_db_len;
80372c00ae9SLiron Himi 	}
80472c00ae9SLiron Himi 
80572c00ae9SLiron Himi 	ree_rule_db = (struct ree_rule_db *)rule_db;
80672c00ae9SLiron Himi 	ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
80772c00ae9SLiron Himi 			rule_db_len, NULL, 0);
80872c00ae9SLiron Himi 	if (ret) {
80972c00ae9SLiron Himi 		cn9k_err("Could not export rule db");
81072c00ae9SLiron Himi 		return -EFAULT;
81172c00ae9SLiron Himi 	}
81272c00ae9SLiron Himi 	ree_rule_db->number_of_entries =
81372c00ae9SLiron Himi 			rule_db_len/sizeof(struct ree_rule_db_entry);
81472c00ae9SLiron Himi 	ree_rule_db->revision = REE_RULE_DB_REVISION;
81572c00ae9SLiron Himi 	ree_rule_db->version = REE_RULE_DB_VERSION;
81672c00ae9SLiron Himi 
81772c00ae9SLiron Himi 	return 0;
81872c00ae9SLiron Himi }
81972c00ae9SLiron Himi 
82072c00ae9SLiron Himi static struct rte_regexdev_ops cn9k_ree_ops = {
82172c00ae9SLiron Himi 	.dev_info_get = cn9k_ree_dev_info_get,
82272c00ae9SLiron Himi 	.dev_configure = cn9k_ree_dev_config,
82372c00ae9SLiron Himi 	.dev_qp_setup = cn9k_ree_queue_pair_setup,
82472c00ae9SLiron Himi 	.dev_start = cn9k_ree_start,
82572c00ae9SLiron Himi 	.dev_stop = cn9k_ree_stop,
82672c00ae9SLiron Himi 	.dev_close = cn9k_ree_close,
82772c00ae9SLiron Himi 	.dev_attr_get = NULL,
82872c00ae9SLiron Himi 	.dev_attr_set = NULL,
82972c00ae9SLiron Himi 	.dev_rule_db_update = cn9k_ree_rule_db_update,
8306825dc23SThomas Monjalon 	.dev_rule_db_compile_activate = NULL,
83172c00ae9SLiron Himi 	.dev_db_import = cn9k_ree_rule_db_import,
83272c00ae9SLiron Himi 	.dev_db_export = cn9k_ree_rule_db_export,
83372c00ae9SLiron Himi 	.dev_xstats_names_get = NULL,
83472c00ae9SLiron Himi 	.dev_xstats_get = NULL,
83572c00ae9SLiron Himi 	.dev_xstats_by_name_get = NULL,
83672c00ae9SLiron Himi 	.dev_xstats_reset = NULL,
83772c00ae9SLiron Himi 	.dev_selftest = NULL,
83872c00ae9SLiron Himi 	.dev_dump = NULL,
83972c00ae9SLiron Himi };
84072c00ae9SLiron Himi 
84172c00ae9SLiron Himi static int
84272c00ae9SLiron Himi cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
84372c00ae9SLiron Himi 		   struct rte_pci_device *pci_dev)
84472c00ae9SLiron Himi {
84572c00ae9SLiron Himi 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
84672c00ae9SLiron Himi 	struct cn9k_ree_data *data;
84772c00ae9SLiron Himi 	struct rte_regexdev *dev;
84872c00ae9SLiron Himi 	struct roc_ree_vf *vf;
84972c00ae9SLiron Himi 	int ret;
85072c00ae9SLiron Himi 
85172c00ae9SLiron Himi 	ret = roc_plt_init();
85272c00ae9SLiron Himi 	if (ret < 0) {
85372c00ae9SLiron Himi 		plt_err("Failed to initialize platform model");
85472c00ae9SLiron Himi 		return ret;
85572c00ae9SLiron Himi 	}
85672c00ae9SLiron Himi 
85772c00ae9SLiron Himi 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
85872c00ae9SLiron Himi 
85972c00ae9SLiron Himi 	dev = ree_dev_register(name);
86072c00ae9SLiron Himi 	if (dev == NULL) {
86172c00ae9SLiron Himi 		ret = -ENODEV;
86272c00ae9SLiron Himi 		goto exit;
86372c00ae9SLiron Himi 	}
86472c00ae9SLiron Himi 
86572c00ae9SLiron Himi 	dev->dev_ops = &cn9k_ree_ops;
86672c00ae9SLiron Himi 	dev->device = &pci_dev->device;
86772c00ae9SLiron Himi 
86872c00ae9SLiron Himi 	/* Get private data space allocated */
86972c00ae9SLiron Himi 	data = dev->data->dev_private;
87072c00ae9SLiron Himi 	vf = &data->vf;
87172c00ae9SLiron Himi 	vf->pci_dev = pci_dev;
87272c00ae9SLiron Himi 	ret = roc_ree_dev_init(vf);
87372c00ae9SLiron Himi 	if (ret) {
87472c00ae9SLiron Himi 		plt_err("Failed to initialize roc cpt rc=%d", ret);
87572c00ae9SLiron Himi 		goto dev_unregister;
87672c00ae9SLiron Himi 	}
87772c00ae9SLiron Himi 
87872c00ae9SLiron Himi 	data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
87972c00ae9SLiron Himi 			RTE_REGEX_PCRE_RULE_ANCHORED_F;
88072c00ae9SLiron Himi 	data->regexdev_capa = 0;
88172c00ae9SLiron Himi 	data->max_groups = REE_MAX_GROUPS;
88272c00ae9SLiron Himi 	data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
88372c00ae9SLiron Himi 	data->nb_rules = 0;
88472c00ae9SLiron Himi 
88572c00ae9SLiron Himi 	dev->state = RTE_REGEXDEV_READY;
88672c00ae9SLiron Himi 	return 0;
88772c00ae9SLiron Himi 
88872c00ae9SLiron Himi dev_unregister:
88972c00ae9SLiron Himi 	ree_dev_unregister(dev);
89072c00ae9SLiron Himi exit:
89172c00ae9SLiron Himi 	cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
89272c00ae9SLiron Himi 		    pci_dev->id.vendor_id, pci_dev->id.device_id);
89372c00ae9SLiron Himi 	return ret;
89472c00ae9SLiron Himi }
89572c00ae9SLiron Himi 
89672c00ae9SLiron Himi static int
89772c00ae9SLiron Himi cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
89872c00ae9SLiron Himi {
89972c00ae9SLiron Himi 	char name[RTE_REGEXDEV_NAME_MAX_LEN];
90072c00ae9SLiron Himi 	struct rte_regexdev *dev = NULL;
90172c00ae9SLiron Himi 
90272c00ae9SLiron Himi 	if (pci_dev == NULL)
90372c00ae9SLiron Himi 		return -EINVAL;
90472c00ae9SLiron Himi 
90572c00ae9SLiron Himi 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
90672c00ae9SLiron Himi 
90772c00ae9SLiron Himi 	dev = rte_regexdev_get_device_by_name(name);
90872c00ae9SLiron Himi 
90972c00ae9SLiron Himi 	if (dev == NULL)
91072c00ae9SLiron Himi 		return -ENODEV;
91172c00ae9SLiron Himi 
91272c00ae9SLiron Himi 	return ree_dev_fini(dev);
91372c00ae9SLiron Himi }
91472c00ae9SLiron Himi 
91572c00ae9SLiron Himi static struct rte_pci_id pci_id_ree_table[] = {
91672c00ae9SLiron Himi 	{
91772c00ae9SLiron Himi 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
91872c00ae9SLiron Himi 				PCI_DEVID_CNXK_RVU_REE_PF)
91972c00ae9SLiron Himi 	},
92072c00ae9SLiron Himi 	{
92172c00ae9SLiron Himi 		.vendor_id = 0,
92272c00ae9SLiron Himi 	}
92372c00ae9SLiron Himi };
92472c00ae9SLiron Himi 
92572c00ae9SLiron Himi static struct rte_pci_driver cn9k_regexdev_pmd = {
92672c00ae9SLiron Himi 	.id_table = pci_id_ree_table,
92772c00ae9SLiron Himi 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
92872c00ae9SLiron Himi 	.probe = cn9k_ree_pci_probe,
92972c00ae9SLiron Himi 	.remove = cn9k_ree_pci_remove,
93072c00ae9SLiron Himi };
93172c00ae9SLiron Himi 
93272c00ae9SLiron Himi 
93372c00ae9SLiron Himi RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
93472c00ae9SLiron Himi RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);
935