1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_log.h> 7 #include <rte_errno.h> 8 #include <rte_pci.h> 9 #include <rte_regexdev.h> 10 #include <rte_regexdev_core.h> 11 #include <rte_regexdev_driver.h> 12 #include <rte_bus_pci.h> 13 14 #include <mlx5_common.h> 15 #include <mlx5_common_mr.h> 16 #include <mlx5_glue.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_prm.h> 19 20 #include "mlx5_regex.h" 21 #include "mlx5_regex_utils.h" 22 #include "mlx5_rxp_csrs.h" 23 24 #define MLX5_REGEX_DRIVER_NAME regex_mlx5 25 26 int mlx5_regex_logtype; 27 28 TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list = 29 TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list); 30 static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER; 31 32 const struct rte_regexdev_ops mlx5_regexdev_ops = { 33 .dev_info_get = mlx5_regex_info_get, 34 .dev_configure = mlx5_regex_configure, 35 .dev_db_import = mlx5_regex_rules_db_import, 36 .dev_qp_setup = mlx5_regex_qp_setup, 37 .dev_start = mlx5_regex_start, 38 .dev_stop = mlx5_regex_stop, 39 .dev_close = mlx5_regex_close, 40 }; 41 42 int 43 mlx5_regex_start(struct rte_regexdev *dev __rte_unused) 44 { 45 return 0; 46 } 47 48 int 49 mlx5_regex_stop(struct rte_regexdev *dev __rte_unused) 50 { 51 return 0; 52 } 53 54 int 55 mlx5_regex_close(struct rte_regexdev *dev __rte_unused) 56 { 57 return 0; 58 } 59 60 static int 61 mlx5_regex_engines_status(struct ibv_context *ctx, int num_engines) 62 { 63 uint32_t fpga_ident = 0; 64 int err; 65 int i; 66 67 for (i = 0; i < num_engines; i++) { 68 err = mlx5_devx_regex_register_read(ctx, i, 69 MLX5_RXP_CSR_IDENTIFIER, 70 &fpga_ident); 71 fpga_ident = (fpga_ident & (0x0000FFFF)); 72 if (err || fpga_ident != MLX5_RXP_IDENTIFIER) { 73 DRV_LOG(ERR, "Failed setup RXP %d err %d database " 74 "memory 0x%x", i, err, fpga_ident); 75 if (!err) 76 err = EINVAL; 77 return err; 78 } 79 } 80 return 0; 81 } 82 83 static void 84 mlx5_regex_get_name(char *name, struct rte_device *dev) 85 { 86 sprintf(name, "mlx5_regex_%s", dev->name); 87 } 88 89 /** 90 * Callback for memory event. 91 * 92 * @param event_type 93 * Memory event type. 94 * @param addr 95 * Address of memory. 96 * @param len 97 * Size of memory. 98 */ 99 static void 100 mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, 101 size_t len, void *arg __rte_unused) 102 { 103 struct mlx5_regex_priv *priv; 104 105 /* Must be called from the primary process. */ 106 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 107 switch (event_type) { 108 case RTE_MEM_EVENT_FREE: 109 pthread_mutex_lock(&mem_event_list_lock); 110 /* Iterate all the existing mlx5 devices. */ 111 TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb) 112 mlx5_free_mr_by_addr(&priv->mr_scache, 113 priv->ctx->device->name, 114 addr, len); 115 pthread_mutex_unlock(&mem_event_list_lock); 116 break; 117 case RTE_MEM_EVENT_ALLOC: 118 default: 119 break; 120 } 121 } 122 123 static int 124 mlx5_regex_dev_probe(struct rte_device *rte_dev) 125 { 126 struct ibv_device *ibv; 127 struct mlx5_regex_priv *priv = NULL; 128 struct ibv_context *ctx = NULL; 129 struct mlx5_hca_attr attr; 130 char name[RTE_REGEXDEV_NAME_MAX_LEN]; 131 int ret; 132 uint32_t val; 133 134 ibv = mlx5_os_get_ibv_dev(rte_dev); 135 if (ibv == NULL) 136 return -rte_errno; 137 DRV_LOG(INFO, "Probe device \"%s\".", ibv->name); 138 ctx = mlx5_glue->dv_open_device(ibv); 139 if (!ctx) { 140 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name); 141 rte_errno = ENODEV; 142 return -rte_errno; 143 } 144 ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr); 145 if (ret) { 146 DRV_LOG(ERR, "Unable to read HCA capabilities."); 147 rte_errno = ENOTSUP; 148 goto dev_error; 149 } else if (((!attr.regex) && (!attr.mmo_regex_sq_en) && 150 (!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) { 151 DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe " 152 "old FW/OFED version?"); 153 rte_errno = ENOTSUP; 154 goto dev_error; 155 } 156 if (mlx5_regex_engines_status(ctx, 2)) { 157 DRV_LOG(ERR, "RegEx engine error."); 158 rte_errno = ENOMEM; 159 goto dev_error; 160 } 161 priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv), 162 RTE_CACHE_LINE_SIZE); 163 if (!priv) { 164 DRV_LOG(ERR, "Failed to allocate private memory."); 165 rte_errno = ENOMEM; 166 goto dev_error; 167 } 168 priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en; 169 priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en; 170 priv->qp_ts_format = attr.qp_ts_format; 171 priv->ctx = ctx; 172 priv->nb_engines = 2; /* attr.regexp_num_of_engines */ 173 ret = mlx5_devx_regex_register_read(priv->ctx, 0, 174 MLX5_RXP_CSR_IDENTIFIER, &val); 175 if (ret) { 176 DRV_LOG(ERR, "CSR read failed!"); 177 goto dev_error; 178 } 179 if (val == MLX5_RXP_BF2_IDENTIFIER) 180 priv->is_bf2 = 1; 181 /* Default RXP programming mode to Shared. */ 182 priv->prog_mode = MLX5_RXP_SHARED_PROG_MODE; 183 mlx5_regex_get_name(name, rte_dev); 184 priv->regexdev = rte_regexdev_register(name); 185 if (priv->regexdev == NULL) { 186 DRV_LOG(ERR, "Failed to register RegEx device."); 187 rte_errno = rte_errno ? rte_errno : EINVAL; 188 goto error; 189 } 190 /* 191 * This PMD always claims the write memory barrier on UAR 192 * registers writings, it is safe to allocate UAR with any 193 * memory mapping type. 194 */ 195 priv->uar = mlx5_devx_alloc_uar(ctx, -1); 196 if (!priv->uar) { 197 DRV_LOG(ERR, "can't allocate uar."); 198 rte_errno = ENOMEM; 199 goto error; 200 } 201 priv->pd = mlx5_glue->alloc_pd(ctx); 202 if (!priv->pd) { 203 DRV_LOG(ERR, "can't allocate pd."); 204 rte_errno = ENOMEM; 205 goto error; 206 } 207 priv->regexdev->dev_ops = &mlx5_regexdev_ops; 208 priv->regexdev->enqueue = mlx5_regexdev_enqueue; 209 #ifdef HAVE_MLX5_UMR_IMKEY 210 if (!attr.umr_indirect_mkey_disabled && 211 !attr.umr_modify_entity_size_disabled) 212 priv->has_umr = 1; 213 if (priv->has_umr) 214 priv->regexdev->enqueue = mlx5_regexdev_enqueue_gga; 215 #endif 216 priv->regexdev->dequeue = mlx5_regexdev_dequeue; 217 priv->regexdev->device = rte_dev; 218 priv->regexdev->data->dev_private = priv; 219 priv->regexdev->state = RTE_REGEXDEV_READY; 220 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr; 221 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr; 222 ret = mlx5_mr_btree_init(&priv->mr_scache.cache, 223 MLX5_MR_BTREE_CACHE_N * 2, 224 rte_socket_id()); 225 if (ret) { 226 DRV_LOG(ERR, "MR init tree failed."); 227 rte_errno = ENOMEM; 228 goto error; 229 } 230 /* Register callback function for global shared MR cache management. */ 231 if (TAILQ_EMPTY(&mlx5_mem_event_list)) 232 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 233 mlx5_regex_mr_mem_event_cb, 234 NULL); 235 /* Add device to memory callback list. */ 236 pthread_mutex_lock(&mem_event_list_lock); 237 TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb); 238 pthread_mutex_unlock(&mem_event_list_lock); 239 DRV_LOG(INFO, "RegEx GGA is %s.", 240 priv->has_umr ? "supported" : "unsupported"); 241 return 0; 242 243 error: 244 if (priv->pd) 245 mlx5_glue->dealloc_pd(priv->pd); 246 if (priv->uar) 247 mlx5_glue->devx_free_uar(priv->uar); 248 if (priv->regexdev) 249 rte_regexdev_unregister(priv->regexdev); 250 dev_error: 251 if (ctx) 252 mlx5_glue->close_device(ctx); 253 if (priv) 254 rte_free(priv); 255 return -rte_errno; 256 } 257 258 static int 259 mlx5_regex_dev_remove(struct rte_device *rte_dev) 260 { 261 char name[RTE_REGEXDEV_NAME_MAX_LEN]; 262 struct rte_regexdev *dev; 263 struct mlx5_regex_priv *priv = NULL; 264 265 mlx5_regex_get_name(name, rte_dev); 266 dev = rte_regexdev_get_device_by_name(name); 267 if (!dev) 268 return 0; 269 priv = dev->data->dev_private; 270 if (priv) { 271 /* Remove from memory callback device list. */ 272 pthread_mutex_lock(&mem_event_list_lock); 273 TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb); 274 pthread_mutex_unlock(&mem_event_list_lock); 275 if (TAILQ_EMPTY(&mlx5_mem_event_list)) 276 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", 277 NULL); 278 if (priv->mr_scache.cache.table) 279 mlx5_mr_release_cache(&priv->mr_scache); 280 if (priv->pd) 281 mlx5_glue->dealloc_pd(priv->pd); 282 if (priv->uar) 283 mlx5_glue->devx_free_uar(priv->uar); 284 if (priv->regexdev) 285 rte_regexdev_unregister(priv->regexdev); 286 if (priv->ctx) 287 mlx5_glue->close_device(priv->ctx); 288 rte_free(priv); 289 } 290 return 0; 291 } 292 293 static const struct rte_pci_id mlx5_regex_pci_id_map[] = { 294 { 295 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 296 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 297 }, 298 { 299 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 300 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF) 301 }, 302 { 303 .vendor_id = 0 304 } 305 }; 306 307 static struct mlx5_class_driver mlx5_regex_driver = { 308 .drv_class = MLX5_CLASS_REGEX, 309 .name = RTE_STR(MLX5_REGEX_DRIVER_NAME), 310 .id_table = mlx5_regex_pci_id_map, 311 .probe = mlx5_regex_dev_probe, 312 .remove = mlx5_regex_dev_remove, 313 }; 314 315 RTE_INIT(rte_mlx5_regex_init) 316 { 317 mlx5_common_init(); 318 if (mlx5_glue) 319 mlx5_class_driver_register(&mlx5_regex_driver); 320 } 321 322 RTE_LOG_REGISTER_DEFAULT(mlx5_regex_logtype, NOTICE) 323 RTE_PMD_EXPORT_NAME(MLX5_REGEX_DRIVER_NAME, __COUNTER__); 324 RTE_PMD_REGISTER_PCI_TABLE(MLX5_REGEX_DRIVER_NAME, mlx5_regex_pci_id_map); 325 RTE_PMD_REGISTER_KMOD_DEP(MLX5_REGEX_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib"); 326