1*72c00ae9SLiron Himi /* SPDX-License-Identifier: BSD-3-Clause 2*72c00ae9SLiron Himi * Copyright (C) 2020 Marvell International Ltd. 3*72c00ae9SLiron Himi */ 4*72c00ae9SLiron Himi 5*72c00ae9SLiron Himi #include <stdio.h> 6*72c00ae9SLiron Himi #include <unistd.h> 7*72c00ae9SLiron Himi 8*72c00ae9SLiron Himi #include <rte_malloc.h> 9*72c00ae9SLiron Himi #include <rte_memzone.h> 10*72c00ae9SLiron Himi #include <rte_regexdev.h> 11*72c00ae9SLiron Himi #include <rte_regexdev_core.h> 12*72c00ae9SLiron Himi #include <rte_regexdev_driver.h> 13*72c00ae9SLiron Himi 14*72c00ae9SLiron Himi 15*72c00ae9SLiron Himi /* REE common headers */ 16*72c00ae9SLiron Himi #include "cn9k_regexdev.h" 17*72c00ae9SLiron Himi #include "cn9k_regexdev_compiler.h" 18*72c00ae9SLiron Himi 19*72c00ae9SLiron Himi 20*72c00ae9SLiron Himi /* HW matches are at offset 0x80 from RES_PTR_ADDR 21*72c00ae9SLiron Himi * In op structure matches starts at W5 (0x28) 22*72c00ae9SLiron Himi * There is a need to copy to 0x28 to 0x80 The matches that are at the tail 23*72c00ae9SLiron Himi * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied 24*72c00ae9SLiron Himi */ 25*72c00ae9SLiron Himi #define REE_NUM_MATCHES_ALIGN 11 26*72c00ae9SLiron Himi /* The REE co-processor will write up to 254 job match structures 27*72c00ae9SLiron Himi * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80. 28*72c00ae9SLiron Himi */ 29*72c00ae9SLiron Himi #define REE_MATCH_OFFSET 0x80 30*72c00ae9SLiron Himi 31*72c00ae9SLiron Himi #define REE_MAX_RULES_PER_GROUP 0xFFFF 32*72c00ae9SLiron Himi #define REE_MAX_GROUPS 0xFFFF 33*72c00ae9SLiron Himi 34*72c00ae9SLiron Himi 35*72c00ae9SLiron Himi #define REE_RULE_DB_VERSION 2 36*72c00ae9SLiron Himi #define REE_RULE_DB_REVISION 0 37*72c00ae9SLiron Himi 38*72c00ae9SLiron Himi struct ree_rule_db_entry { 39*72c00ae9SLiron Himi uint8_t type; 40*72c00ae9SLiron Himi uint32_t addr; 41*72c00ae9SLiron Himi uint64_t value; 42*72c00ae9SLiron Himi }; 43*72c00ae9SLiron Himi 44*72c00ae9SLiron Himi struct ree_rule_db { 45*72c00ae9SLiron Himi uint32_t version; 46*72c00ae9SLiron Himi uint32_t revision; 47*72c00ae9SLiron Himi uint32_t number_of_entries; 48*72c00ae9SLiron Himi struct ree_rule_db_entry entries[]; 49*72c00ae9SLiron Himi } __rte_packed; 50*72c00ae9SLiron Himi 51*72c00ae9SLiron Himi static void 52*72c00ae9SLiron Himi qp_memzone_name_get(char *name, int size, int dev_id, int qp_id) 53*72c00ae9SLiron Himi { 54*72c00ae9SLiron Himi snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id); 55*72c00ae9SLiron Himi } 56*72c00ae9SLiron Himi 57*72c00ae9SLiron Himi static struct roc_ree_qp * 58*72c00ae9SLiron Himi ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id) 59*72c00ae9SLiron Himi { 60*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 61*72c00ae9SLiron Himi uint64_t pg_sz = sysconf(_SC_PAGESIZE); 62*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 63*72c00ae9SLiron Himi const struct rte_memzone *lf_mem; 64*72c00ae9SLiron Himi uint32_t len, iq_len, size_div2; 65*72c00ae9SLiron Himi char name[RTE_MEMZONE_NAMESIZE]; 66*72c00ae9SLiron Himi uint64_t used_len, iova; 67*72c00ae9SLiron Himi struct roc_ree_qp *qp; 68*72c00ae9SLiron Himi uint8_t *va; 69*72c00ae9SLiron Himi int ret; 70*72c00ae9SLiron Himi 71*72c00ae9SLiron Himi /* Allocate queue pair */ 72*72c00ae9SLiron Himi qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp), 73*72c00ae9SLiron Himi ROC_ALIGN); 74*72c00ae9SLiron Himi if (qp == NULL) { 75*72c00ae9SLiron Himi cn9k_err("Could not allocate queue pair"); 76*72c00ae9SLiron Himi return NULL; 77*72c00ae9SLiron Himi } 78*72c00ae9SLiron Himi 79*72c00ae9SLiron Himi iq_len = REE_IQ_LEN; 80*72c00ae9SLiron Himi 81*72c00ae9SLiron Himi /* 82*72c00ae9SLiron Himi * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B), 83*72c00ae9SLiron Himi * and a power of 2. 84*72c00ae9SLiron Himi * effective queue size to software is (size - 1) * 128 85*72c00ae9SLiron Himi */ 86*72c00ae9SLiron Himi size_div2 = iq_len >> 1; 87*72c00ae9SLiron Himi 88*72c00ae9SLiron Himi /* For pending queue */ 89*72c00ae9SLiron Himi len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8); 90*72c00ae9SLiron Himi 91*72c00ae9SLiron Himi /* So that instruction queues start as pg size aligned */ 92*72c00ae9SLiron Himi len = RTE_ALIGN(len, pg_sz); 93*72c00ae9SLiron Himi 94*72c00ae9SLiron Himi /* For instruction queues */ 95*72c00ae9SLiron Himi len += REE_IQ_LEN * sizeof(union roc_ree_inst); 96*72c00ae9SLiron Himi 97*72c00ae9SLiron Himi /* Waste after instruction queues */ 98*72c00ae9SLiron Himi len = RTE_ALIGN(len, pg_sz); 99*72c00ae9SLiron Himi 100*72c00ae9SLiron Himi qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id, 101*72c00ae9SLiron Himi qp_id); 102*72c00ae9SLiron Himi 103*72c00ae9SLiron Himi lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(), 104*72c00ae9SLiron Himi RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB, 105*72c00ae9SLiron Himi RTE_CACHE_LINE_SIZE); 106*72c00ae9SLiron Himi if (lf_mem == NULL) { 107*72c00ae9SLiron Himi cn9k_err("Could not allocate reserved memzone"); 108*72c00ae9SLiron Himi goto qp_free; 109*72c00ae9SLiron Himi } 110*72c00ae9SLiron Himi 111*72c00ae9SLiron Himi va = lf_mem->addr; 112*72c00ae9SLiron Himi iova = lf_mem->iova; 113*72c00ae9SLiron Himi 114*72c00ae9SLiron Himi memset(va, 0, len); 115*72c00ae9SLiron Himi 116*72c00ae9SLiron Himi /* Initialize pending queue */ 117*72c00ae9SLiron Himi qp->pend_q.rid_queue = (struct roc_ree_rid *)va; 118*72c00ae9SLiron Himi qp->pend_q.enq_tail = 0; 119*72c00ae9SLiron Himi qp->pend_q.deq_head = 0; 120*72c00ae9SLiron Himi qp->pend_q.pending_count = 0; 121*72c00ae9SLiron Himi 122*72c00ae9SLiron Himi used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8); 123*72c00ae9SLiron Himi used_len = RTE_ALIGN(used_len, pg_sz); 124*72c00ae9SLiron Himi iova += used_len; 125*72c00ae9SLiron Himi 126*72c00ae9SLiron Himi qp->iq_dma_addr = iova; 127*72c00ae9SLiron Himi qp->id = qp_id; 128*72c00ae9SLiron Himi qp->base = roc_ree_qp_get_base(vf, qp_id); 129*72c00ae9SLiron Himi qp->roc_regexdev_jobid = 0; 130*72c00ae9SLiron Himi qp->write_offset = 0; 131*72c00ae9SLiron Himi 132*72c00ae9SLiron Himi ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2); 133*72c00ae9SLiron Himi if (ret) { 134*72c00ae9SLiron Himi cn9k_err("Could not enable instruction queue"); 135*72c00ae9SLiron Himi goto qp_free; 136*72c00ae9SLiron Himi } 137*72c00ae9SLiron Himi 138*72c00ae9SLiron Himi return qp; 139*72c00ae9SLiron Himi 140*72c00ae9SLiron Himi qp_free: 141*72c00ae9SLiron Himi rte_free(qp); 142*72c00ae9SLiron Himi return NULL; 143*72c00ae9SLiron Himi } 144*72c00ae9SLiron Himi 145*72c00ae9SLiron Himi static int 146*72c00ae9SLiron Himi ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp) 147*72c00ae9SLiron Himi { 148*72c00ae9SLiron Himi const struct rte_memzone *lf_mem; 149*72c00ae9SLiron Himi char name[RTE_MEMZONE_NAMESIZE]; 150*72c00ae9SLiron Himi int ret; 151*72c00ae9SLiron Himi 152*72c00ae9SLiron Himi roc_ree_iq_disable(qp); 153*72c00ae9SLiron Himi 154*72c00ae9SLiron Himi qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id, 155*72c00ae9SLiron Himi qp->id); 156*72c00ae9SLiron Himi 157*72c00ae9SLiron Himi lf_mem = rte_memzone_lookup(name); 158*72c00ae9SLiron Himi 159*72c00ae9SLiron Himi ret = rte_memzone_free(lf_mem); 160*72c00ae9SLiron Himi if (ret) 161*72c00ae9SLiron Himi return ret; 162*72c00ae9SLiron Himi 163*72c00ae9SLiron Himi rte_free(qp); 164*72c00ae9SLiron Himi 165*72c00ae9SLiron Himi return 0; 166*72c00ae9SLiron Himi } 167*72c00ae9SLiron Himi 168*72c00ae9SLiron Himi static int 169*72c00ae9SLiron Himi ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id) 170*72c00ae9SLiron Himi { 171*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 172*72c00ae9SLiron Himi struct roc_ree_qp *qp = data->queue_pairs[qp_id]; 173*72c00ae9SLiron Himi int ret; 174*72c00ae9SLiron Himi 175*72c00ae9SLiron Himi ree_func_trace("Queue=%d", qp_id); 176*72c00ae9SLiron Himi 177*72c00ae9SLiron Himi if (qp == NULL) 178*72c00ae9SLiron Himi return -EINVAL; 179*72c00ae9SLiron Himi 180*72c00ae9SLiron Himi ret = ree_qp_destroy(dev, qp); 181*72c00ae9SLiron Himi if (ret) { 182*72c00ae9SLiron Himi cn9k_err("Could not destroy queue pair %d", qp_id); 183*72c00ae9SLiron Himi return ret; 184*72c00ae9SLiron Himi } 185*72c00ae9SLiron Himi 186*72c00ae9SLiron Himi data->queue_pairs[qp_id] = NULL; 187*72c00ae9SLiron Himi 188*72c00ae9SLiron Himi return 0; 189*72c00ae9SLiron Himi } 190*72c00ae9SLiron Himi 191*72c00ae9SLiron Himi static struct rte_regexdev * 192*72c00ae9SLiron Himi ree_dev_register(const char *name) 193*72c00ae9SLiron Himi { 194*72c00ae9SLiron Himi struct rte_regexdev *dev; 195*72c00ae9SLiron Himi 196*72c00ae9SLiron Himi cn9k_ree_dbg("Creating regexdev %s\n", name); 197*72c00ae9SLiron Himi 198*72c00ae9SLiron Himi /* allocate device structure */ 199*72c00ae9SLiron Himi dev = rte_regexdev_register(name); 200*72c00ae9SLiron Himi if (dev == NULL) { 201*72c00ae9SLiron Himi cn9k_err("Failed to allocate regex device for %s", name); 202*72c00ae9SLiron Himi return NULL; 203*72c00ae9SLiron Himi } 204*72c00ae9SLiron Himi 205*72c00ae9SLiron Himi /* allocate private device structure */ 206*72c00ae9SLiron Himi if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 207*72c00ae9SLiron Himi dev->data->dev_private = 208*72c00ae9SLiron Himi rte_zmalloc_socket("regexdev device private", 209*72c00ae9SLiron Himi sizeof(struct cn9k_ree_data), 210*72c00ae9SLiron Himi RTE_CACHE_LINE_SIZE, 211*72c00ae9SLiron Himi rte_socket_id()); 212*72c00ae9SLiron Himi 213*72c00ae9SLiron Himi if (dev->data->dev_private == NULL) { 214*72c00ae9SLiron Himi cn9k_err("Cannot allocate memory for dev %s private data", 215*72c00ae9SLiron Himi name); 216*72c00ae9SLiron Himi 217*72c00ae9SLiron Himi rte_regexdev_unregister(dev); 218*72c00ae9SLiron Himi return NULL; 219*72c00ae9SLiron Himi } 220*72c00ae9SLiron Himi } 221*72c00ae9SLiron Himi 222*72c00ae9SLiron Himi return dev; 223*72c00ae9SLiron Himi } 224*72c00ae9SLiron Himi 225*72c00ae9SLiron Himi static int 226*72c00ae9SLiron Himi ree_dev_unregister(struct rte_regexdev *dev) 227*72c00ae9SLiron Himi { 228*72c00ae9SLiron Himi cn9k_ree_dbg("Closing regex device %s", dev->device->name); 229*72c00ae9SLiron Himi 230*72c00ae9SLiron Himi /* free regex device */ 231*72c00ae9SLiron Himi rte_regexdev_unregister(dev); 232*72c00ae9SLiron Himi 233*72c00ae9SLiron Himi if (rte_eal_process_type() == RTE_PROC_PRIMARY) 234*72c00ae9SLiron Himi rte_free(dev->data->dev_private); 235*72c00ae9SLiron Himi 236*72c00ae9SLiron Himi return 0; 237*72c00ae9SLiron Himi } 238*72c00ae9SLiron Himi 239*72c00ae9SLiron Himi static int 240*72c00ae9SLiron Himi ree_dev_fini(struct rte_regexdev *dev) 241*72c00ae9SLiron Himi { 242*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 243*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 244*72c00ae9SLiron Himi int i, ret; 245*72c00ae9SLiron Himi 246*72c00ae9SLiron Himi ree_func_trace(); 247*72c00ae9SLiron Himi 248*72c00ae9SLiron Himi for (i = 0; i < data->nb_queue_pairs; i++) { 249*72c00ae9SLiron Himi ret = ree_queue_pair_release(dev, i); 250*72c00ae9SLiron Himi if (ret) 251*72c00ae9SLiron Himi return ret; 252*72c00ae9SLiron Himi } 253*72c00ae9SLiron Himi 254*72c00ae9SLiron Himi ret = roc_ree_queues_detach(vf); 255*72c00ae9SLiron Himi if (ret) 256*72c00ae9SLiron Himi cn9k_err("Could not detach queues"); 257*72c00ae9SLiron Himi 258*72c00ae9SLiron Himi /* TEMP : should be in lib */ 259*72c00ae9SLiron Himi if (data->queue_pairs) 260*72c00ae9SLiron Himi rte_free(data->queue_pairs); 261*72c00ae9SLiron Himi if (data->rules) 262*72c00ae9SLiron Himi rte_free(data->rules); 263*72c00ae9SLiron Himi 264*72c00ae9SLiron Himi roc_ree_dev_fini(vf); 265*72c00ae9SLiron Himi 266*72c00ae9SLiron Himi ret = ree_dev_unregister(dev); 267*72c00ae9SLiron Himi if (ret) 268*72c00ae9SLiron Himi cn9k_err("Could not destroy PMD"); 269*72c00ae9SLiron Himi 270*72c00ae9SLiron Himi return ret; 271*72c00ae9SLiron Himi } 272*72c00ae9SLiron Himi 273*72c00ae9SLiron Himi static inline int 274*72c00ae9SLiron Himi ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op, 275*72c00ae9SLiron Himi struct roc_ree_pending_queue *pend_q) 276*72c00ae9SLiron Himi { 277*72c00ae9SLiron Himi union roc_ree_inst inst; 278*72c00ae9SLiron Himi union ree_res *res; 279*72c00ae9SLiron Himi uint32_t offset; 280*72c00ae9SLiron Himi 281*72c00ae9SLiron Himi if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) { 282*72c00ae9SLiron Himi cn9k_err("Pending count %" PRIu64 " is greater than Q size %d", 283*72c00ae9SLiron Himi pend_q->pending_count, REE_DEFAULT_CMD_QLEN); 284*72c00ae9SLiron Himi return -EAGAIN; 285*72c00ae9SLiron Himi } 286*72c00ae9SLiron Himi if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE || 287*72c00ae9SLiron Himi op->mbuf->data_len == 0)) { 288*72c00ae9SLiron Himi cn9k_err("Packet length %d is greater than MAX payload %d", 289*72c00ae9SLiron Himi op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE); 290*72c00ae9SLiron Himi return -EAGAIN; 291*72c00ae9SLiron Himi } 292*72c00ae9SLiron Himi 293*72c00ae9SLiron Himi /* W 0 */ 294*72c00ae9SLiron Himi inst.cn98xx.ooj = 1; 295*72c00ae9SLiron Himi inst.cn98xx.dg = 0; 296*72c00ae9SLiron Himi inst.cn98xx.doneint = 0; 297*72c00ae9SLiron Himi /* W 1 */ 298*72c00ae9SLiron Himi inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t); 299*72c00ae9SLiron Himi /* W 2 */ 300*72c00ae9SLiron Himi inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF; 301*72c00ae9SLiron Himi inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32; 302*72c00ae9SLiron Himi 303*72c00ae9SLiron Himi /* W 3 */ 304*72c00ae9SLiron Himi inst.cn98xx.res_ptr_addr = (uint64_t)op; 305*72c00ae9SLiron Himi /* W 4 */ 306*72c00ae9SLiron Himi inst.cn98xx.wq_ptr = 0; 307*72c00ae9SLiron Himi /* W 5 */ 308*72c00ae9SLiron Himi inst.cn98xx.ggrp = 0; 309*72c00ae9SLiron Himi inst.cn98xx.tt = 0; 310*72c00ae9SLiron Himi inst.cn98xx.tag = 0; 311*72c00ae9SLiron Himi /* W 6 */ 312*72c00ae9SLiron Himi inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF; 313*72c00ae9SLiron Himi if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F) 314*72c00ae9SLiron Himi inst.cn98xx.ree_job_ctrl = (0x2 << 8); 315*72c00ae9SLiron Himi else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F) 316*72c00ae9SLiron Himi inst.cn98xx.ree_job_ctrl = (0x1 << 8); 317*72c00ae9SLiron Himi else 318*72c00ae9SLiron Himi inst.cn98xx.ree_job_ctrl = 0; 319*72c00ae9SLiron Himi inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid; 320*72c00ae9SLiron Himi /* W 7 */ 321*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_0 = op->group_id0; 322*72c00ae9SLiron Himi if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F) 323*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_1 = op->group_id1; 324*72c00ae9SLiron Himi else 325*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_1 = op->group_id0; 326*72c00ae9SLiron Himi if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F) 327*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_2 = op->group_id2; 328*72c00ae9SLiron Himi else 329*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_2 = op->group_id0; 330*72c00ae9SLiron Himi if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F) 331*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_3 = op->group_id3; 332*72c00ae9SLiron Himi else 333*72c00ae9SLiron Himi inst.cn98xx.ree_job_subset_id_3 = op->group_id0; 334*72c00ae9SLiron Himi 335*72c00ae9SLiron Himi /* Copy REE command to Q */ 336*72c00ae9SLiron Himi offset = qp->write_offset * sizeof(inst); 337*72c00ae9SLiron Himi memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst)); 338*72c00ae9SLiron Himi 339*72c00ae9SLiron Himi pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op; 340*72c00ae9SLiron Himi pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id; 341*72c00ae9SLiron Himi 342*72c00ae9SLiron Himi /* Mark result as not done */ 343*72c00ae9SLiron Himi res = (union ree_res *)(op); 344*72c00ae9SLiron Himi res->s.done = 0; 345*72c00ae9SLiron Himi res->s.ree_err = 0; 346*72c00ae9SLiron Himi 347*72c00ae9SLiron Himi /* We will use soft queue length here to limit requests */ 348*72c00ae9SLiron Himi REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN); 349*72c00ae9SLiron Himi pend_q->pending_count += 1; 350*72c00ae9SLiron Himi REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF); 351*72c00ae9SLiron Himi REE_MOD_INC(qp->write_offset, REE_IQ_LEN); 352*72c00ae9SLiron Himi 353*72c00ae9SLiron Himi return 0; 354*72c00ae9SLiron Himi } 355*72c00ae9SLiron Himi 356*72c00ae9SLiron Himi static uint16_t 357*72c00ae9SLiron Himi cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id, 358*72c00ae9SLiron Himi struct rte_regex_ops **ops, uint16_t nb_ops) 359*72c00ae9SLiron Himi { 360*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 361*72c00ae9SLiron Himi struct roc_ree_qp *qp = data->queue_pairs[qp_id]; 362*72c00ae9SLiron Himi struct roc_ree_pending_queue *pend_q; 363*72c00ae9SLiron Himi uint16_t nb_allowed, count = 0; 364*72c00ae9SLiron Himi struct rte_regex_ops *op; 365*72c00ae9SLiron Himi int ret; 366*72c00ae9SLiron Himi 367*72c00ae9SLiron Himi pend_q = &qp->pend_q; 368*72c00ae9SLiron Himi 369*72c00ae9SLiron Himi nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count; 370*72c00ae9SLiron Himi if (nb_ops > nb_allowed) 371*72c00ae9SLiron Himi nb_ops = nb_allowed; 372*72c00ae9SLiron Himi 373*72c00ae9SLiron Himi for (count = 0; count < nb_ops; count++) { 374*72c00ae9SLiron Himi op = ops[count]; 375*72c00ae9SLiron Himi ret = ree_enqueue(qp, op, pend_q); 376*72c00ae9SLiron Himi 377*72c00ae9SLiron Himi if (unlikely(ret)) 378*72c00ae9SLiron Himi break; 379*72c00ae9SLiron Himi } 380*72c00ae9SLiron Himi 381*72c00ae9SLiron Himi /* 382*72c00ae9SLiron Himi * Make sure all instructions are written before DOORBELL is activated 383*72c00ae9SLiron Himi */ 384*72c00ae9SLiron Himi rte_io_wmb(); 385*72c00ae9SLiron Himi 386*72c00ae9SLiron Himi /* Update Doorbell */ 387*72c00ae9SLiron Himi plt_write64(count, qp->base + REE_LF_DOORBELL); 388*72c00ae9SLiron Himi 389*72c00ae9SLiron Himi return count; 390*72c00ae9SLiron Himi } 391*72c00ae9SLiron Himi 392*72c00ae9SLiron Himi static inline void 393*72c00ae9SLiron Himi ree_dequeue_post_process(struct rte_regex_ops *ops) 394*72c00ae9SLiron Himi { 395*72c00ae9SLiron Himi uint8_t ree_res_mcnt, ree_res_dmcnt; 396*72c00ae9SLiron Himi int off = REE_MATCH_OFFSET; 397*72c00ae9SLiron Himi struct ree_res_s_98 *res; 398*72c00ae9SLiron Himi uint16_t ree_res_status; 399*72c00ae9SLiron Himi uint64_t match; 400*72c00ae9SLiron Himi 401*72c00ae9SLiron Himi res = (struct ree_res_s_98 *)ops; 402*72c00ae9SLiron Himi /* store res values on stack since ops and res 403*72c00ae9SLiron Himi * are using the same memory 404*72c00ae9SLiron Himi */ 405*72c00ae9SLiron Himi ree_res_status = res->ree_res_status; 406*72c00ae9SLiron Himi ree_res_mcnt = res->ree_res_mcnt; 407*72c00ae9SLiron Himi ree_res_dmcnt = res->ree_res_dmcnt; 408*72c00ae9SLiron Himi ops->rsp_flags = 0; 409*72c00ae9SLiron Himi ops->nb_actual_matches = ree_res_dmcnt; 410*72c00ae9SLiron Himi ops->nb_matches = ree_res_mcnt; 411*72c00ae9SLiron Himi if (unlikely(res->ree_err)) { 412*72c00ae9SLiron Himi ops->nb_actual_matches = 0; 413*72c00ae9SLiron Himi ops->nb_matches = 0; 414*72c00ae9SLiron Himi } 415*72c00ae9SLiron Himi 416*72c00ae9SLiron Himi if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) { 417*72c00ae9SLiron Himi if (ree_res_status & REE_STATUS_PMI_SOJ_BIT) 418*72c00ae9SLiron Himi ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F; 419*72c00ae9SLiron Himi if (ree_res_status & REE_STATUS_PMI_EOJ_BIT) 420*72c00ae9SLiron Himi ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F; 421*72c00ae9SLiron Himi if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT) 422*72c00ae9SLiron Himi ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F; 423*72c00ae9SLiron Himi if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT) 424*72c00ae9SLiron Himi ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F; 425*72c00ae9SLiron Himi if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT) 426*72c00ae9SLiron Himi ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F; 427*72c00ae9SLiron Himi } 428*72c00ae9SLiron Himi if (ops->nb_matches > 0) { 429*72c00ae9SLiron Himi /* Move the matches to the correct offset */ 430*72c00ae9SLiron Himi off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ? 431*72c00ae9SLiron Himi ops->nb_matches : REE_NUM_MATCHES_ALIGN); 432*72c00ae9SLiron Himi match = (uint64_t)ops + REE_MATCH_OFFSET; 433*72c00ae9SLiron Himi match += (ops->nb_matches - off) * 434*72c00ae9SLiron Himi sizeof(union ree_match); 435*72c00ae9SLiron Himi memcpy((void *)ops->matches, (void *)match, 436*72c00ae9SLiron Himi off * sizeof(union ree_match)); 437*72c00ae9SLiron Himi } 438*72c00ae9SLiron Himi } 439*72c00ae9SLiron Himi 440*72c00ae9SLiron Himi static uint16_t 441*72c00ae9SLiron Himi cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id, 442*72c00ae9SLiron Himi struct rte_regex_ops **ops, uint16_t nb_ops) 443*72c00ae9SLiron Himi { 444*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 445*72c00ae9SLiron Himi struct roc_ree_qp *qp = data->queue_pairs[qp_id]; 446*72c00ae9SLiron Himi struct roc_ree_pending_queue *pend_q; 447*72c00ae9SLiron Himi int i, nb_pending, nb_completed = 0; 448*72c00ae9SLiron Himi volatile struct ree_res_s_98 *res; 449*72c00ae9SLiron Himi struct roc_ree_rid *rid; 450*72c00ae9SLiron Himi 451*72c00ae9SLiron Himi pend_q = &qp->pend_q; 452*72c00ae9SLiron Himi 453*72c00ae9SLiron Himi nb_pending = pend_q->pending_count; 454*72c00ae9SLiron Himi 455*72c00ae9SLiron Himi if (nb_ops > nb_pending) 456*72c00ae9SLiron Himi nb_ops = nb_pending; 457*72c00ae9SLiron Himi 458*72c00ae9SLiron Himi for (i = 0; i < nb_ops; i++) { 459*72c00ae9SLiron Himi rid = &pend_q->rid_queue[pend_q->deq_head]; 460*72c00ae9SLiron Himi res = (volatile struct ree_res_s_98 *)(rid->rid); 461*72c00ae9SLiron Himi 462*72c00ae9SLiron Himi /* Check response header done bit if completed */ 463*72c00ae9SLiron Himi if (unlikely(!res->done)) 464*72c00ae9SLiron Himi break; 465*72c00ae9SLiron Himi 466*72c00ae9SLiron Himi ops[i] = (struct rte_regex_ops *)(rid->rid); 467*72c00ae9SLiron Himi ops[i]->user_id = rid->user_id; 468*72c00ae9SLiron Himi 469*72c00ae9SLiron Himi REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN); 470*72c00ae9SLiron Himi pend_q->pending_count -= 1; 471*72c00ae9SLiron Himi } 472*72c00ae9SLiron Himi 473*72c00ae9SLiron Himi nb_completed = i; 474*72c00ae9SLiron Himi 475*72c00ae9SLiron Himi for (i = 0; i < nb_completed; i++) 476*72c00ae9SLiron Himi ree_dequeue_post_process(ops[i]); 477*72c00ae9SLiron Himi 478*72c00ae9SLiron Himi return nb_completed; 479*72c00ae9SLiron Himi } 480*72c00ae9SLiron Himi 481*72c00ae9SLiron Himi static int 482*72c00ae9SLiron Himi cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info) 483*72c00ae9SLiron Himi { 484*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 485*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 486*72c00ae9SLiron Himi 487*72c00ae9SLiron Himi ree_func_trace(); 488*72c00ae9SLiron Himi 489*72c00ae9SLiron Himi if (info == NULL) 490*72c00ae9SLiron Himi return -EINVAL; 491*72c00ae9SLiron Himi 492*72c00ae9SLiron Himi info->driver_name = dev->device->driver->name; 493*72c00ae9SLiron Himi info->dev = dev->device; 494*72c00ae9SLiron Himi 495*72c00ae9SLiron Himi info->max_queue_pairs = vf->max_queues; 496*72c00ae9SLiron Himi info->max_matches = vf->max_matches; 497*72c00ae9SLiron Himi info->max_payload_size = REE_MAX_PAYLOAD_SIZE; 498*72c00ae9SLiron Himi info->max_rules_per_group = data->max_rules_per_group; 499*72c00ae9SLiron Himi info->max_groups = data->max_groups; 500*72c00ae9SLiron Himi info->regexdev_capa = data->regexdev_capa; 501*72c00ae9SLiron Himi info->rule_flags = data->rule_flags; 502*72c00ae9SLiron Himi 503*72c00ae9SLiron Himi return 0; 504*72c00ae9SLiron Himi } 505*72c00ae9SLiron Himi 506*72c00ae9SLiron Himi static int 507*72c00ae9SLiron Himi cn9k_ree_dev_config(struct rte_regexdev *dev, 508*72c00ae9SLiron Himi const struct rte_regexdev_config *cfg) 509*72c00ae9SLiron Himi { 510*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 511*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 512*72c00ae9SLiron Himi const struct ree_rule_db *rule_db; 513*72c00ae9SLiron Himi uint32_t rule_db_len; 514*72c00ae9SLiron Himi int ret; 515*72c00ae9SLiron Himi 516*72c00ae9SLiron Himi ree_func_trace(); 517*72c00ae9SLiron Himi 518*72c00ae9SLiron Himi if (cfg->nb_queue_pairs > vf->max_queues) { 519*72c00ae9SLiron Himi cn9k_err("Invalid number of queue pairs requested"); 520*72c00ae9SLiron Himi return -EINVAL; 521*72c00ae9SLiron Himi } 522*72c00ae9SLiron Himi 523*72c00ae9SLiron Himi if (cfg->nb_max_matches != vf->max_matches) { 524*72c00ae9SLiron Himi cn9k_err("Invalid number of max matches requested"); 525*72c00ae9SLiron Himi return -EINVAL; 526*72c00ae9SLiron Himi } 527*72c00ae9SLiron Himi 528*72c00ae9SLiron Himi if (cfg->dev_cfg_flags != 0) { 529*72c00ae9SLiron Himi cn9k_err("Invalid device configuration flags requested"); 530*72c00ae9SLiron Himi return -EINVAL; 531*72c00ae9SLiron Himi } 532*72c00ae9SLiron Himi 533*72c00ae9SLiron Himi /* Unregister error interrupts */ 534*72c00ae9SLiron Himi if (vf->err_intr_registered) 535*72c00ae9SLiron Himi roc_ree_err_intr_unregister(vf); 536*72c00ae9SLiron Himi 537*72c00ae9SLiron Himi /* Detach queues */ 538*72c00ae9SLiron Himi if (vf->nb_queues) { 539*72c00ae9SLiron Himi ret = roc_ree_queues_detach(vf); 540*72c00ae9SLiron Himi if (ret) { 541*72c00ae9SLiron Himi cn9k_err("Could not detach REE queues"); 542*72c00ae9SLiron Himi return ret; 543*72c00ae9SLiron Himi } 544*72c00ae9SLiron Himi } 545*72c00ae9SLiron Himi 546*72c00ae9SLiron Himi /* TEMP : should be in lib */ 547*72c00ae9SLiron Himi if (data->queue_pairs == NULL) { /* first time configuration */ 548*72c00ae9SLiron Himi data->queue_pairs = rte_zmalloc("regexdev->queue_pairs", 549*72c00ae9SLiron Himi sizeof(data->queue_pairs[0]) * 550*72c00ae9SLiron Himi cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE); 551*72c00ae9SLiron Himi 552*72c00ae9SLiron Himi if (data->queue_pairs == NULL) { 553*72c00ae9SLiron Himi data->nb_queue_pairs = 0; 554*72c00ae9SLiron Himi cn9k_err("Failed to get memory for qp meta data, nb_queues %u", 555*72c00ae9SLiron Himi cfg->nb_queue_pairs); 556*72c00ae9SLiron Himi return -ENOMEM; 557*72c00ae9SLiron Himi } 558*72c00ae9SLiron Himi } else { /* re-configure */ 559*72c00ae9SLiron Himi uint16_t old_nb_queues = data->nb_queue_pairs; 560*72c00ae9SLiron Himi void **qp; 561*72c00ae9SLiron Himi unsigned int i; 562*72c00ae9SLiron Himi 563*72c00ae9SLiron Himi qp = data->queue_pairs; 564*72c00ae9SLiron Himi 565*72c00ae9SLiron Himi for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) { 566*72c00ae9SLiron Himi ret = ree_queue_pair_release(dev, i); 567*72c00ae9SLiron Himi if (ret < 0) 568*72c00ae9SLiron Himi return ret; 569*72c00ae9SLiron Himi } 570*72c00ae9SLiron Himi 571*72c00ae9SLiron Himi qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs, 572*72c00ae9SLiron Himi RTE_CACHE_LINE_SIZE); 573*72c00ae9SLiron Himi if (qp == NULL) { 574*72c00ae9SLiron Himi cn9k_err("Failed to realloc qp meta data, nb_queues %u", 575*72c00ae9SLiron Himi cfg->nb_queue_pairs); 576*72c00ae9SLiron Himi return -ENOMEM; 577*72c00ae9SLiron Himi } 578*72c00ae9SLiron Himi 579*72c00ae9SLiron Himi if (cfg->nb_queue_pairs > old_nb_queues) { 580*72c00ae9SLiron Himi uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues; 581*72c00ae9SLiron Himi memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs); 582*72c00ae9SLiron Himi } 583*72c00ae9SLiron Himi 584*72c00ae9SLiron Himi data->queue_pairs = qp; 585*72c00ae9SLiron Himi } 586*72c00ae9SLiron Himi data->nb_queue_pairs = cfg->nb_queue_pairs; 587*72c00ae9SLiron Himi 588*72c00ae9SLiron Himi /* Attach queues */ 589*72c00ae9SLiron Himi cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs); 590*72c00ae9SLiron Himi ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs); 591*72c00ae9SLiron Himi if (ret) { 592*72c00ae9SLiron Himi cn9k_err("Could not attach queues"); 593*72c00ae9SLiron Himi return -ENODEV; 594*72c00ae9SLiron Himi } 595*72c00ae9SLiron Himi 596*72c00ae9SLiron Himi ret = roc_ree_msix_offsets_get(vf); 597*72c00ae9SLiron Himi if (ret) { 598*72c00ae9SLiron Himi cn9k_err("Could not get MSI-X offsets"); 599*72c00ae9SLiron Himi goto queues_detach; 600*72c00ae9SLiron Himi } 601*72c00ae9SLiron Himi 602*72c00ae9SLiron Himi if (cfg->rule_db && cfg->rule_db_len) { 603*72c00ae9SLiron Himi cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len); 604*72c00ae9SLiron Himi rule_db = (const struct ree_rule_db *)cfg->rule_db; 605*72c00ae9SLiron Himi rule_db_len = rule_db->number_of_entries * 606*72c00ae9SLiron Himi sizeof(struct ree_rule_db_entry); 607*72c00ae9SLiron Himi cn9k_ree_dbg("rule_db number of entries %d", 608*72c00ae9SLiron Himi rule_db->number_of_entries); 609*72c00ae9SLiron Himi if (rule_db_len > cfg->rule_db_len) { 610*72c00ae9SLiron Himi cn9k_err("Could not program rule db"); 611*72c00ae9SLiron Himi ret = -EINVAL; 612*72c00ae9SLiron Himi goto queues_detach; 613*72c00ae9SLiron Himi } 614*72c00ae9SLiron Himi ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries, 615*72c00ae9SLiron Himi rule_db_len, NULL, REE_NON_INC_PROG); 616*72c00ae9SLiron Himi if (ret) { 617*72c00ae9SLiron Himi cn9k_err("Could not program rule db"); 618*72c00ae9SLiron Himi goto queues_detach; 619*72c00ae9SLiron Himi } 620*72c00ae9SLiron Himi } 621*72c00ae9SLiron Himi 622*72c00ae9SLiron Himi dev->enqueue = cn9k_ree_enqueue_burst; 623*72c00ae9SLiron Himi dev->dequeue = cn9k_ree_dequeue_burst; 624*72c00ae9SLiron Himi 625*72c00ae9SLiron Himi rte_mb(); 626*72c00ae9SLiron Himi return 0; 627*72c00ae9SLiron Himi 628*72c00ae9SLiron Himi queues_detach: 629*72c00ae9SLiron Himi roc_ree_queues_detach(vf); 630*72c00ae9SLiron Himi return ret; 631*72c00ae9SLiron Himi } 632*72c00ae9SLiron Himi 633*72c00ae9SLiron Himi static int 634*72c00ae9SLiron Himi cn9k_ree_stop(struct rte_regexdev *dev) 635*72c00ae9SLiron Himi { 636*72c00ae9SLiron Himi RTE_SET_USED(dev); 637*72c00ae9SLiron Himi 638*72c00ae9SLiron Himi ree_func_trace(); 639*72c00ae9SLiron Himi return 0; 640*72c00ae9SLiron Himi } 641*72c00ae9SLiron Himi 642*72c00ae9SLiron Himi static int 643*72c00ae9SLiron Himi cn9k_ree_start(struct rte_regexdev *dev) 644*72c00ae9SLiron Himi { 645*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 646*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 647*72c00ae9SLiron Himi uint32_t rule_db_len = 0; 648*72c00ae9SLiron Himi int ret; 649*72c00ae9SLiron Himi 650*72c00ae9SLiron Himi ree_func_trace(); 651*72c00ae9SLiron Himi 652*72c00ae9SLiron Himi ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL); 653*72c00ae9SLiron Himi if (ret) 654*72c00ae9SLiron Himi return ret; 655*72c00ae9SLiron Himi if (rule_db_len == 0) { 656*72c00ae9SLiron Himi cn9k_err("Rule db not programmed"); 657*72c00ae9SLiron Himi return -EFAULT; 658*72c00ae9SLiron Himi } 659*72c00ae9SLiron Himi 660*72c00ae9SLiron Himi return 0; 661*72c00ae9SLiron Himi } 662*72c00ae9SLiron Himi 663*72c00ae9SLiron Himi static int 664*72c00ae9SLiron Himi cn9k_ree_close(struct rte_regexdev *dev) 665*72c00ae9SLiron Himi { 666*72c00ae9SLiron Himi return ree_dev_fini(dev); 667*72c00ae9SLiron Himi } 668*72c00ae9SLiron Himi 669*72c00ae9SLiron Himi static int 670*72c00ae9SLiron Himi cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id, 671*72c00ae9SLiron Himi const struct rte_regexdev_qp_conf *qp_conf) 672*72c00ae9SLiron Himi { 673*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 674*72c00ae9SLiron Himi struct roc_ree_qp *qp; 675*72c00ae9SLiron Himi 676*72c00ae9SLiron Himi ree_func_trace("Queue=%d", qp_id); 677*72c00ae9SLiron Himi 678*72c00ae9SLiron Himi if (data->queue_pairs[qp_id] != NULL) 679*72c00ae9SLiron Himi ree_queue_pair_release(dev, qp_id); 680*72c00ae9SLiron Himi 681*72c00ae9SLiron Himi if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) { 682*72c00ae9SLiron Himi cn9k_err("Could not setup queue pair for %u descriptors", 683*72c00ae9SLiron Himi qp_conf->nb_desc); 684*72c00ae9SLiron Himi return -EINVAL; 685*72c00ae9SLiron Himi } 686*72c00ae9SLiron Himi if (qp_conf->qp_conf_flags != 0) { 687*72c00ae9SLiron Himi cn9k_err("Could not setup queue pair with configuration flags 0x%x", 688*72c00ae9SLiron Himi qp_conf->qp_conf_flags); 689*72c00ae9SLiron Himi return -EINVAL; 690*72c00ae9SLiron Himi } 691*72c00ae9SLiron Himi 692*72c00ae9SLiron Himi qp = ree_qp_create(dev, qp_id); 693*72c00ae9SLiron Himi if (qp == NULL) { 694*72c00ae9SLiron Himi cn9k_err("Could not create queue pair %d", qp_id); 695*72c00ae9SLiron Himi return -ENOMEM; 696*72c00ae9SLiron Himi } 697*72c00ae9SLiron Himi data->queue_pairs[qp_id] = qp; 698*72c00ae9SLiron Himi 699*72c00ae9SLiron Himi return 0; 700*72c00ae9SLiron Himi } 701*72c00ae9SLiron Himi 702*72c00ae9SLiron Himi static int 703*72c00ae9SLiron Himi cn9k_ree_rule_db_compile_activate(struct rte_regexdev *dev) 704*72c00ae9SLiron Himi { 705*72c00ae9SLiron Himi return cn9k_ree_rule_db_compile_prog(dev); 706*72c00ae9SLiron Himi } 707*72c00ae9SLiron Himi 708*72c00ae9SLiron Himi static int 709*72c00ae9SLiron Himi cn9k_ree_rule_db_update(struct rte_regexdev *dev, 710*72c00ae9SLiron Himi const struct rte_regexdev_rule *rules, uint16_t nb_rules) 711*72c00ae9SLiron Himi { 712*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 713*72c00ae9SLiron Himi struct rte_regexdev_rule *old_ptr; 714*72c00ae9SLiron Himi uint32_t i, sum_nb_rules; 715*72c00ae9SLiron Himi 716*72c00ae9SLiron Himi ree_func_trace("nb_rules=%d", nb_rules); 717*72c00ae9SLiron Himi 718*72c00ae9SLiron Himi for (i = 0; i < nb_rules; i++) { 719*72c00ae9SLiron Himi if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE) 720*72c00ae9SLiron Himi break; 721*72c00ae9SLiron Himi if (rules[i].group_id >= data->max_groups) 722*72c00ae9SLiron Himi break; 723*72c00ae9SLiron Himi if (rules[i].rule_id >= data->max_rules_per_group) 724*72c00ae9SLiron Himi break; 725*72c00ae9SLiron Himi /* logical implication 726*72c00ae9SLiron Himi * p q p -> q 727*72c00ae9SLiron Himi * 0 0 1 728*72c00ae9SLiron Himi * 0 1 1 729*72c00ae9SLiron Himi * 1 0 0 730*72c00ae9SLiron Himi * 1 1 1 731*72c00ae9SLiron Himi */ 732*72c00ae9SLiron Himi if ((~(rules[i].rule_flags) | data->rule_flags) == 0) 733*72c00ae9SLiron Himi break; 734*72c00ae9SLiron Himi } 735*72c00ae9SLiron Himi nb_rules = i; 736*72c00ae9SLiron Himi 737*72c00ae9SLiron Himi if (data->nb_rules == 0) { 738*72c00ae9SLiron Himi 739*72c00ae9SLiron Himi data->rules = rte_malloc("rte_regexdev_rules", 740*72c00ae9SLiron Himi nb_rules*sizeof(struct rte_regexdev_rule), 0); 741*72c00ae9SLiron Himi if (data->rules == NULL) 742*72c00ae9SLiron Himi return -ENOMEM; 743*72c00ae9SLiron Himi 744*72c00ae9SLiron Himi memcpy(data->rules, rules, 745*72c00ae9SLiron Himi nb_rules*sizeof(struct rte_regexdev_rule)); 746*72c00ae9SLiron Himi data->nb_rules = nb_rules; 747*72c00ae9SLiron Himi } else { 748*72c00ae9SLiron Himi 749*72c00ae9SLiron Himi old_ptr = data->rules; 750*72c00ae9SLiron Himi sum_nb_rules = data->nb_rules + nb_rules; 751*72c00ae9SLiron Himi data->rules = rte_realloc(data->rules, 752*72c00ae9SLiron Himi sum_nb_rules * sizeof(struct rte_regexdev_rule), 753*72c00ae9SLiron Himi 0); 754*72c00ae9SLiron Himi if (data->rules == NULL) { 755*72c00ae9SLiron Himi data->rules = old_ptr; 756*72c00ae9SLiron Himi return -ENOMEM; 757*72c00ae9SLiron Himi } 758*72c00ae9SLiron Himi memcpy(&data->rules[data->nb_rules], rules, 759*72c00ae9SLiron Himi nb_rules*sizeof(struct rte_regexdev_rule)); 760*72c00ae9SLiron Himi data->nb_rules = sum_nb_rules; 761*72c00ae9SLiron Himi } 762*72c00ae9SLiron Himi return nb_rules; 763*72c00ae9SLiron Himi } 764*72c00ae9SLiron Himi 765*72c00ae9SLiron Himi static int 766*72c00ae9SLiron Himi cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db, 767*72c00ae9SLiron Himi uint32_t rule_db_len) 768*72c00ae9SLiron Himi { 769*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 770*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 771*72c00ae9SLiron Himi const struct ree_rule_db *ree_rule_db; 772*72c00ae9SLiron Himi uint32_t ree_rule_db_len; 773*72c00ae9SLiron Himi int ret; 774*72c00ae9SLiron Himi 775*72c00ae9SLiron Himi ree_func_trace("rule_db_len=%d", rule_db_len); 776*72c00ae9SLiron Himi 777*72c00ae9SLiron Himi ree_rule_db = (const struct ree_rule_db *)rule_db; 778*72c00ae9SLiron Himi ree_rule_db_len = ree_rule_db->number_of_entries * 779*72c00ae9SLiron Himi sizeof(struct ree_rule_db_entry); 780*72c00ae9SLiron Himi if (ree_rule_db_len > rule_db_len) { 781*72c00ae9SLiron Himi cn9k_err("Could not program rule db"); 782*72c00ae9SLiron Himi return -EINVAL; 783*72c00ae9SLiron Himi } 784*72c00ae9SLiron Himi ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries, 785*72c00ae9SLiron Himi ree_rule_db_len, NULL, REE_NON_INC_PROG); 786*72c00ae9SLiron Himi if (ret) { 787*72c00ae9SLiron Himi cn9k_err("Could not program rule db"); 788*72c00ae9SLiron Himi return -ENOSPC; 789*72c00ae9SLiron Himi } 790*72c00ae9SLiron Himi return 0; 791*72c00ae9SLiron Himi } 792*72c00ae9SLiron Himi 793*72c00ae9SLiron Himi static int 794*72c00ae9SLiron Himi cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db) 795*72c00ae9SLiron Himi { 796*72c00ae9SLiron Himi struct cn9k_ree_data *data = dev->data->dev_private; 797*72c00ae9SLiron Himi struct roc_ree_vf *vf = &data->vf; 798*72c00ae9SLiron Himi struct ree_rule_db *ree_rule_db; 799*72c00ae9SLiron Himi uint32_t rule_dbi_len; 800*72c00ae9SLiron Himi uint32_t rule_db_len; 801*72c00ae9SLiron Himi int ret; 802*72c00ae9SLiron Himi 803*72c00ae9SLiron Himi ree_func_trace(); 804*72c00ae9SLiron Himi 805*72c00ae9SLiron Himi ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len); 806*72c00ae9SLiron Himi if (ret) 807*72c00ae9SLiron Himi return ret; 808*72c00ae9SLiron Himi 809*72c00ae9SLiron Himi if (rule_db == NULL) { 810*72c00ae9SLiron Himi rule_db_len += sizeof(struct ree_rule_db); 811*72c00ae9SLiron Himi return rule_db_len; 812*72c00ae9SLiron Himi } 813*72c00ae9SLiron Himi 814*72c00ae9SLiron Himi ree_rule_db = (struct ree_rule_db *)rule_db; 815*72c00ae9SLiron Himi ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries, 816*72c00ae9SLiron Himi rule_db_len, NULL, 0); 817*72c00ae9SLiron Himi if (ret) { 818*72c00ae9SLiron Himi cn9k_err("Could not export rule db"); 819*72c00ae9SLiron Himi return -EFAULT; 820*72c00ae9SLiron Himi } 821*72c00ae9SLiron Himi ree_rule_db->number_of_entries = 822*72c00ae9SLiron Himi rule_db_len/sizeof(struct ree_rule_db_entry); 823*72c00ae9SLiron Himi ree_rule_db->revision = REE_RULE_DB_REVISION; 824*72c00ae9SLiron Himi ree_rule_db->version = REE_RULE_DB_VERSION; 825*72c00ae9SLiron Himi 826*72c00ae9SLiron Himi return 0; 827*72c00ae9SLiron Himi } 828*72c00ae9SLiron Himi 829*72c00ae9SLiron Himi static struct rte_regexdev_ops cn9k_ree_ops = { 830*72c00ae9SLiron Himi .dev_info_get = cn9k_ree_dev_info_get, 831*72c00ae9SLiron Himi .dev_configure = cn9k_ree_dev_config, 832*72c00ae9SLiron Himi .dev_qp_setup = cn9k_ree_queue_pair_setup, 833*72c00ae9SLiron Himi .dev_start = cn9k_ree_start, 834*72c00ae9SLiron Himi .dev_stop = cn9k_ree_stop, 835*72c00ae9SLiron Himi .dev_close = cn9k_ree_close, 836*72c00ae9SLiron Himi .dev_attr_get = NULL, 837*72c00ae9SLiron Himi .dev_attr_set = NULL, 838*72c00ae9SLiron Himi .dev_rule_db_update = cn9k_ree_rule_db_update, 839*72c00ae9SLiron Himi .dev_rule_db_compile_activate = 840*72c00ae9SLiron Himi cn9k_ree_rule_db_compile_activate, 841*72c00ae9SLiron Himi .dev_db_import = cn9k_ree_rule_db_import, 842*72c00ae9SLiron Himi .dev_db_export = cn9k_ree_rule_db_export, 843*72c00ae9SLiron Himi .dev_xstats_names_get = NULL, 844*72c00ae9SLiron Himi .dev_xstats_get = NULL, 845*72c00ae9SLiron Himi .dev_xstats_by_name_get = NULL, 846*72c00ae9SLiron Himi .dev_xstats_reset = NULL, 847*72c00ae9SLiron Himi .dev_selftest = NULL, 848*72c00ae9SLiron Himi .dev_dump = NULL, 849*72c00ae9SLiron Himi }; 850*72c00ae9SLiron Himi 851*72c00ae9SLiron Himi static int 852*72c00ae9SLiron Himi cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 853*72c00ae9SLiron Himi struct rte_pci_device *pci_dev) 854*72c00ae9SLiron Himi { 855*72c00ae9SLiron Himi char name[RTE_REGEXDEV_NAME_MAX_LEN]; 856*72c00ae9SLiron Himi struct cn9k_ree_data *data; 857*72c00ae9SLiron Himi struct rte_regexdev *dev; 858*72c00ae9SLiron Himi struct roc_ree_vf *vf; 859*72c00ae9SLiron Himi int ret; 860*72c00ae9SLiron Himi 861*72c00ae9SLiron Himi ret = roc_plt_init(); 862*72c00ae9SLiron Himi if (ret < 0) { 863*72c00ae9SLiron Himi plt_err("Failed to initialize platform model"); 864*72c00ae9SLiron Himi return ret; 865*72c00ae9SLiron Himi } 866*72c00ae9SLiron Himi 867*72c00ae9SLiron Himi rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 868*72c00ae9SLiron Himi 869*72c00ae9SLiron Himi dev = ree_dev_register(name); 870*72c00ae9SLiron Himi if (dev == NULL) { 871*72c00ae9SLiron Himi ret = -ENODEV; 872*72c00ae9SLiron Himi goto exit; 873*72c00ae9SLiron Himi } 874*72c00ae9SLiron Himi 875*72c00ae9SLiron Himi dev->dev_ops = &cn9k_ree_ops; 876*72c00ae9SLiron Himi dev->device = &pci_dev->device; 877*72c00ae9SLiron Himi 878*72c00ae9SLiron Himi /* Get private data space allocated */ 879*72c00ae9SLiron Himi data = dev->data->dev_private; 880*72c00ae9SLiron Himi vf = &data->vf; 881*72c00ae9SLiron Himi vf->pci_dev = pci_dev; 882*72c00ae9SLiron Himi ret = roc_ree_dev_init(vf); 883*72c00ae9SLiron Himi if (ret) { 884*72c00ae9SLiron Himi plt_err("Failed to initialize roc cpt rc=%d", ret); 885*72c00ae9SLiron Himi goto dev_unregister; 886*72c00ae9SLiron Himi } 887*72c00ae9SLiron Himi 888*72c00ae9SLiron Himi data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F | 889*72c00ae9SLiron Himi RTE_REGEX_PCRE_RULE_ANCHORED_F; 890*72c00ae9SLiron Himi data->regexdev_capa = 0; 891*72c00ae9SLiron Himi data->max_groups = REE_MAX_GROUPS; 892*72c00ae9SLiron Himi data->max_rules_per_group = REE_MAX_RULES_PER_GROUP; 893*72c00ae9SLiron Himi data->nb_rules = 0; 894*72c00ae9SLiron Himi 895*72c00ae9SLiron Himi dev->state = RTE_REGEXDEV_READY; 896*72c00ae9SLiron Himi return 0; 897*72c00ae9SLiron Himi 898*72c00ae9SLiron Himi dev_unregister: 899*72c00ae9SLiron Himi ree_dev_unregister(dev); 900*72c00ae9SLiron Himi exit: 901*72c00ae9SLiron Himi cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)", 902*72c00ae9SLiron Himi pci_dev->id.vendor_id, pci_dev->id.device_id); 903*72c00ae9SLiron Himi return ret; 904*72c00ae9SLiron Himi } 905*72c00ae9SLiron Himi 906*72c00ae9SLiron Himi static int 907*72c00ae9SLiron Himi cn9k_ree_pci_remove(struct rte_pci_device *pci_dev) 908*72c00ae9SLiron Himi { 909*72c00ae9SLiron Himi char name[RTE_REGEXDEV_NAME_MAX_LEN]; 910*72c00ae9SLiron Himi struct rte_regexdev *dev = NULL; 911*72c00ae9SLiron Himi 912*72c00ae9SLiron Himi if (pci_dev == NULL) 913*72c00ae9SLiron Himi return -EINVAL; 914*72c00ae9SLiron Himi 915*72c00ae9SLiron Himi rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); 916*72c00ae9SLiron Himi 917*72c00ae9SLiron Himi dev = rte_regexdev_get_device_by_name(name); 918*72c00ae9SLiron Himi 919*72c00ae9SLiron Himi if (dev == NULL) 920*72c00ae9SLiron Himi return -ENODEV; 921*72c00ae9SLiron Himi 922*72c00ae9SLiron Himi return ree_dev_fini(dev); 923*72c00ae9SLiron Himi } 924*72c00ae9SLiron Himi 925*72c00ae9SLiron Himi static struct rte_pci_id pci_id_ree_table[] = { 926*72c00ae9SLiron Himi { 927*72c00ae9SLiron Himi RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 928*72c00ae9SLiron Himi PCI_DEVID_CNXK_RVU_REE_PF) 929*72c00ae9SLiron Himi }, 930*72c00ae9SLiron Himi { 931*72c00ae9SLiron Himi .vendor_id = 0, 932*72c00ae9SLiron Himi } 933*72c00ae9SLiron Himi }; 934*72c00ae9SLiron Himi 935*72c00ae9SLiron Himi static struct rte_pci_driver cn9k_regexdev_pmd = { 936*72c00ae9SLiron Himi .id_table = pci_id_ree_table, 937*72c00ae9SLiron Himi .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 938*72c00ae9SLiron Himi .probe = cn9k_ree_pci_probe, 939*72c00ae9SLiron Himi .remove = cn9k_ree_pci_remove, 940*72c00ae9SLiron Himi }; 941*72c00ae9SLiron Himi 942*72c00ae9SLiron Himi 943*72c00ae9SLiron Himi RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd); 944*72c00ae9SLiron Himi RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table); 945