1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <unistd.h> 6 #include <string.h> 7 #include <stdio.h> 8 9 #include <rte_mempool.h> 10 #include <bus_pci_driver.h> 11 #include <rte_malloc.h> 12 #include <rte_errno.h> 13 14 #include "mlx5_devx_cmds.h" 15 #include "../mlx5_common_log.h" 16 #include "mlx5_common.h" 17 #include "mlx5_common_os.h" 18 #include "mlx5_malloc.h" 19 20 /** 21 * Initialization routine for run-time dependency on external lib. 22 */ 23 void 24 mlx5_glue_constructor(void) 25 { 26 } 27 28 /** 29 * Validate user arguments for remote PD and CTX. 30 * 31 * @param config 32 * Pointer to device configuration structure. 33 * 34 * @return 35 * 0 on success, a negative errno value otherwise and rte_errno is set. 36 */ 37 int 38 mlx5_os_remote_pd_and_ctx_validate(struct mlx5_common_dev_config *config) 39 { 40 int device_fd = config->device_fd; 41 int pd_handle = config->pd_handle; 42 43 if (pd_handle != MLX5_ARG_UNSET || device_fd != MLX5_ARG_UNSET) { 44 DRV_LOG(ERR, "Remote PD and CTX is not supported on Windows."); 45 rte_errno = ENOTSUP; 46 return -rte_errno; 47 } 48 return 0; 49 } 50 51 /** 52 * Release PD. Releases a given mlx5_pd object 53 * 54 * @param[in] cdev 55 * Pointer to the mlx5 device. 56 * 57 * @return 58 * Zero if pd is released successfully, negative number otherwise. 59 */ 60 int 61 mlx5_os_pd_release(struct mlx5_common_device *cdev) 62 { 63 struct mlx5_pd *pd = cdev->pd; 64 65 if (!pd) 66 return -EINVAL; 67 mlx5_devx_cmd_destroy(pd->obj); 68 mlx5_free(pd); 69 return 0; 70 } 71 72 /** 73 * Allocate Protection Domain object and extract its pdn using DV API. 74 * 75 * @param[out] cdev 76 * Pointer to the mlx5 device. 77 * 78 * @return 79 * 0 on success, a negative value otherwise. 80 */ 81 int 82 mlx5_os_pd_prepare(struct mlx5_common_device *cdev) 83 { 84 struct mlx5_pd *pd; 85 86 pd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pd), 0, SOCKET_ID_ANY); 87 if (!pd) 88 return -1; 89 struct mlx5_devx_obj *obj = mlx5_devx_cmd_alloc_pd(cdev->ctx); 90 if (!obj) { 91 mlx5_free(pd); 92 return -1; 93 } 94 pd->obj = obj; 95 pd->pdn = obj->id; 96 pd->devx_ctx = cdev->ctx; 97 cdev->pd = pd; 98 cdev->pdn = pd->pdn; 99 return 0; 100 } 101 102 /** 103 * Detect if a devx_device_bdf object has identical DBDF values to the 104 * rte_pci_addr found in bus/pci probing. 105 * 106 * @param[in] devx_bdf 107 * Pointer to the devx_device_bdf structure. 108 * @param[in] addr 109 * Pointer to the rte_pci_addr structure. 110 * 111 * @return 112 * 1 on Device match, 0 on mismatch. 113 */ 114 static int 115 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf, 116 struct rte_pci_addr *addr) 117 { 118 if (addr->domain != (devx_bdf->bus_id >> 8) || 119 addr->bus != (devx_bdf->bus_id & 0xff) || 120 addr->devid != devx_bdf->dev_id || 121 addr->function != devx_bdf->fnc_id) { 122 return 0; 123 } 124 return 1; 125 } 126 127 /** 128 * Detect if a devx_device_bdf object matches the rte_pci_addr 129 * found in bus/pci probing 130 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF. 131 * 132 * @param[in] devx_bdf 133 * Pointer to the devx_device_bdf structure. 134 * @param[in] addr 135 * Pointer to the rte_pci_addr structure. 136 * 137 * @return 138 * 1 on Device match, 0 on mismatch, rte_errno code on failure. 139 */ 140 static int 141 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf, 142 struct rte_pci_addr *addr) 143 { 144 int err; 145 struct devx_device mlx5_dev; 146 147 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr)) 148 return 1; 149 /* 150 * Didn't match on Native/PF BDF, could still match a VF BDF, 151 * check it next. 152 */ 153 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev); 154 if (err) { 155 DRV_LOG(ERR, "query_device failed"); 156 rte_errno = err; 157 return rte_errno; 158 } 159 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr)) 160 return 1; 161 return 0; 162 } 163 164 /** 165 * Look for DevX device that match to given rte_device. 166 * 167 * @param dev 168 * Pointer to the generic device. 169 * @param devx_list 170 * Pointer to head of DevX devices list. 171 * @param n 172 * Number of devices in given DevX devices list. 173 * 174 * @return 175 * A device match on success, NULL otherwise and rte_errno is set. 176 */ 177 static struct devx_device_bdf * 178 mlx5_os_get_devx_device(struct rte_device *dev, 179 struct devx_device_bdf *devx_list, int n) 180 { 181 struct devx_device_bdf *devx_match = NULL; 182 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 183 struct rte_pci_addr *addr = &pci_dev->addr; 184 185 while (n-- > 0) { 186 int ret = mlx5_match_devx_devices_to_addr(devx_list, addr); 187 if (!ret) { 188 devx_list++; 189 continue; 190 } 191 if (ret != 1) { 192 rte_errno = ret; 193 return NULL; 194 } 195 devx_match = devx_list; 196 break; 197 } 198 if (devx_match == NULL) { 199 /* No device matches, just complain and bail out. */ 200 DRV_LOG(WARNING, 201 "No DevX device matches PCI device " PCI_PRI_FMT "," 202 " is DevX Configured?", 203 addr->domain, addr->bus, addr->devid, addr->function); 204 rte_errno = ENOENT; 205 } 206 return devx_match; 207 } 208 209 /** 210 * Function API open device under Windows. 211 * 212 * This function calls the Windows glue APIs to open a device. 213 * 214 * @param cdev 215 * Pointer to mlx5 device structure. 216 * @param classes 217 * Chosen classes come from user device arguments. 218 * 219 * @return 220 * 0 on success, a negative errno value otherwise and rte_errno is set. 221 */ 222 int 223 mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes) 224 { 225 struct devx_device_bdf *devx_bdf_dev = NULL; 226 struct devx_device_bdf *devx_list; 227 struct mlx5_context *mlx5_ctx = NULL; 228 int n; 229 230 if (classes != MLX5_CLASS_ETH && classes != MLX5_CLASS_CRYPTO) { 231 DRV_LOG(ERR, 232 "The chosen classes are not supported on Windows."); 233 rte_errno = ENOTSUP; 234 return -rte_errno; 235 } 236 errno = 0; 237 devx_list = mlx5_glue->get_device_list(&n); 238 if (devx_list == NULL) { 239 rte_errno = errno ? errno : ENOSYS; 240 DRV_LOG(ERR, "Cannot list devices, is DevX enabled?"); 241 return -rte_errno; 242 } 243 devx_bdf_dev = mlx5_os_get_devx_device(cdev->dev, devx_list, n); 244 if (devx_bdf_dev == NULL) 245 goto error; 246 /* Try to open DevX device with DV. */ 247 mlx5_ctx = mlx5_glue->open_device(devx_bdf_dev); 248 if (mlx5_ctx == NULL) { 249 DRV_LOG(ERR, "Failed to open DevX device."); 250 rte_errno = errno; 251 goto error; 252 } 253 if (mlx5_glue->query_device(devx_bdf_dev, &mlx5_ctx->mlx5_dev)) { 254 DRV_LOG(ERR, "Failed to query device context fields."); 255 rte_errno = errno; 256 goto error; 257 } 258 cdev->config.devx = 1; 259 cdev->ctx = mlx5_ctx; 260 mlx5_glue->free_device_list(devx_list); 261 return 0; 262 error: 263 if (mlx5_ctx != NULL) 264 claim_zero(mlx5_glue->close_device(mlx5_ctx)); 265 mlx5_glue->free_device_list(devx_list); 266 return -rte_errno; 267 } 268 269 /** 270 * Register umem. 271 * 272 * @param[in] ctx 273 * Pointer to context. 274 * @param[in] addr 275 * Pointer to memory start address. 276 * @param[in] size 277 * Size of the memory to register. 278 * @param[out] access 279 * UMEM access type 280 * 281 * @return 282 * umem on successful registration, NULL and errno otherwise 283 */ 284 void * 285 mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access) 286 { 287 struct mlx5_devx_umem *umem; 288 289 umem = mlx5_malloc(MLX5_MEM_ZERO, 290 (sizeof(*umem)), 0, SOCKET_ID_ANY); 291 if (!umem) { 292 errno = ENOMEM; 293 return NULL; 294 } 295 umem->umem_hdl = mlx5_glue->devx_umem_reg(ctx, addr, size, access, 296 &umem->umem_id); 297 if (!umem->umem_hdl) { 298 mlx5_free(umem); 299 return NULL; 300 } 301 umem->addr = addr; 302 return umem; 303 } 304 305 /** 306 * Deregister umem. 307 * 308 * @param[in] pumem 309 * Pointer to umem. 310 * 311 * @return 312 * 0 on successful release, negative number otherwise 313 */ 314 int 315 mlx5_os_umem_dereg(void *pumem) 316 { 317 struct mlx5_devx_umem *umem; 318 int err = 0; 319 320 if (!pumem) 321 return err; 322 umem = pumem; 323 if (umem->umem_hdl) 324 err = mlx5_glue->devx_umem_dereg(umem->umem_hdl); 325 mlx5_free(umem); 326 return err; 327 } 328 329 /** 330 * Register mr. Given protection domain pointer, pointer to addr and length 331 * register the memory region. 332 * 333 * @param[in] pd 334 * Pointer to protection domain context (type mlx5_pd). 335 * @param[in] addr 336 * Pointer to memory start address (type devx_device_ctx). 337 * @param[in] length 338 * Length of the memory to register. 339 * @param[out] pmd_mr 340 * pmd_mr struct set with lkey, address, length, pointer to mr object, mkey 341 * 342 * @return 343 * 0 on successful registration, -1 otherwise 344 */ 345 static int 346 mlx5_os_reg_mr(void *pd, 347 void *addr, size_t length, struct mlx5_pmd_mr *pmd_mr) 348 { 349 struct mlx5_devx_mkey_attr mkey_attr; 350 struct mlx5_pd *mlx5_pd = (struct mlx5_pd *)pd; 351 struct mlx5_hca_attr attr; 352 struct mlx5_devx_obj *mkey; 353 void *obj; 354 355 if (!pd || !addr) { 356 rte_errno = EINVAL; 357 return -1; 358 } 359 if (mlx5_devx_cmd_query_hca_attr(mlx5_pd->devx_ctx, &attr)) 360 return -1; 361 obj = mlx5_os_umem_reg(mlx5_pd->devx_ctx, addr, length, 362 IBV_ACCESS_LOCAL_WRITE); 363 if (!obj) 364 return -1; 365 memset(&mkey_attr, 0, sizeof(mkey_attr)); 366 mkey_attr.addr = (uintptr_t)addr; 367 mkey_attr.size = length; 368 mkey_attr.umem_id = ((struct mlx5_devx_umem *)(obj))->umem_id; 369 mkey_attr.pd = mlx5_pd->pdn; 370 if (!haswell_broadwell_cpu) { 371 mkey_attr.relaxed_ordering_write = attr.relaxed_ordering_write; 372 mkey_attr.relaxed_ordering_read = attr.relaxed_ordering_read; 373 } 374 mkey = mlx5_devx_cmd_mkey_create(mlx5_pd->devx_ctx, &mkey_attr); 375 if (!mkey) { 376 claim_zero(mlx5_os_umem_dereg(obj)); 377 return -1; 378 } 379 pmd_mr->addr = addr; 380 pmd_mr->len = length; 381 pmd_mr->obj = obj; 382 pmd_mr->mkey = mkey; 383 pmd_mr->lkey = pmd_mr->mkey->id; 384 return 0; 385 } 386 387 /** 388 * De-register mr. 389 * 390 * @param[in] pmd_mr 391 * Pointer to PMD mr object 392 */ 393 static void 394 mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr) 395 { 396 if (!pmd_mr) 397 return; 398 if (pmd_mr->mkey) 399 claim_zero(mlx5_devx_cmd_destroy(pmd_mr->mkey)); 400 if (pmd_mr->obj) 401 claim_zero(mlx5_os_umem_dereg(pmd_mr->obj)); 402 memset(pmd_mr, 0, sizeof(*pmd_mr)); 403 } 404 405 /** 406 * Set the reg_mr and dereg_mr callbacks. 407 * 408 * @param[out] reg_mr_cb 409 * Pointer to reg_mr func 410 * @param[out] dereg_mr_cb 411 * Pointer to dereg_mr func 412 * 413 */ 414 void 415 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb) 416 { 417 *reg_mr_cb = mlx5_os_reg_mr; 418 *dereg_mr_cb = mlx5_os_dereg_mr; 419 } 420 421 /* 422 * In Windows, no need to wrap the MR, no known issue for it in kernel. 423 * Use the regular function to create direct MR. 424 */ 425 int 426 mlx5_os_wrapped_mkey_create(void *ctx, void *pd, uint32_t pdn, void *addr, 427 size_t length, struct mlx5_pmd_wrapped_mr *wpmd_mr) 428 { 429 struct mlx5_pmd_mr pmd_mr = {0}; 430 int ret = mlx5_os_reg_mr(pd, addr, length, &pmd_mr); 431 432 (void)pdn; 433 (void)ctx; 434 if (ret != 0) 435 return -1; 436 wpmd_mr->addr = addr; 437 wpmd_mr->len = length; 438 wpmd_mr->obj = pmd_mr.obj; 439 wpmd_mr->imkey = pmd_mr.mkey; 440 wpmd_mr->lkey = pmd_mr.mkey->id; 441 return 0; 442 } 443 444 void 445 mlx5_os_wrapped_mkey_destroy(struct mlx5_pmd_wrapped_mr *wpmd_mr) 446 { 447 struct mlx5_pmd_mr pmd_mr; 448 449 if (!wpmd_mr) 450 return; 451 pmd_mr.addr = wpmd_mr->addr; 452 pmd_mr.len = wpmd_mr->len; 453 pmd_mr.obj = wpmd_mr->obj; 454 pmd_mr.mkey = wpmd_mr->imkey; 455 pmd_mr.lkey = wpmd_mr->lkey; 456 mlx5_os_dereg_mr(&pmd_mr); 457 memset(wpmd_mr, 0, sizeof(*wpmd_mr)); 458 } 459