xref: /dpdk/drivers/bus/cdx/cdx_vfio.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
3  */
4 
5 /**
6  * @file
7  * CDX probing using Linux VFIO.
8  *
9  * This code tries to determine if the CDX device is bound to VFIO driver,
10  * and initialize it (map MMIO regions, set up interrupts) if that's the case.
11  *
12  */
13 
14 #include <fcntl.h>
15 #include <unistd.h>
16 #include <sys/eventfd.h>
17 #include <sys/socket.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <rte_eal_paging.h>
21 #include <rte_malloc.h>
22 #include <rte_vfio.h>
23 
24 #include "bus_cdx_driver.h"
25 #include "cdx_logs.h"
26 #include "private.h"
27 
28 /**
29  * A structure describing a CDX mapping.
30  */
31 struct cdx_map {
32 	void *addr;
33 	char *path;
34 	uint64_t offset;
35 	uint64_t size;
36 };
37 
38 /**
39  * A structure describing a mapped CDX resource.
40  * For multi-process we need to reproduce all CDX mappings in secondary
41  * processes, so save them in a tailq.
42  */
43 struct mapped_cdx_resource {
44 	TAILQ_ENTRY(mapped_cdx_resource) next;
45 	char name[RTE_DEV_NAME_MAX_LEN];      /**< CDX device name */
46 	char path[PATH_MAX];
47 	int nb_maps;
48 	struct cdx_map maps[RTE_CDX_MAX_RESOURCE];
49 };
50 
51 /** mapped cdx device list */
52 TAILQ_HEAD(mapped_cdx_res_list, mapped_cdx_resource);
53 
54 /* IRQ set buffer length for MSI interrupts */
55 #define MSI_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
56 			      sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
57 
58 static struct rte_tailq_elem cdx_vfio_tailq = {
59 	.name = "VFIO_CDX_RESOURCE_LIST",
60 };
61 EAL_REGISTER_TAILQ(cdx_vfio_tailq)
62 
63 static struct mapped_cdx_resource *
64 cdx_vfio_find_and_unmap_resource(struct mapped_cdx_res_list *vfio_res_list,
65 		struct rte_cdx_device *dev)
66 {
67 	struct mapped_cdx_resource *vfio_res = NULL;
68 	const char *dev_name = dev->device.name;
69 	struct cdx_map *maps;
70 	int i;
71 
72 	/* Get vfio_res */
73 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
74 		if (strcmp(vfio_res->name, dev_name))
75 			continue;
76 		break;
77 	}
78 
79 	if  (vfio_res == NULL)
80 		return vfio_res;
81 
82 	CDX_BUS_INFO("Releasing CDX mapped resource for %s", dev_name);
83 
84 	maps = vfio_res->maps;
85 	for (i = 0; i < vfio_res->nb_maps; i++) {
86 		if (maps[i].addr) {
87 			CDX_BUS_DEBUG("Calling cdx_unmap_resource for %s at %p",
88 				dev_name, maps[i].addr);
89 			cdx_unmap_resource(maps[i].addr, maps[i].size);
90 		}
91 	}
92 
93 	return vfio_res;
94 }
95 
96 static int
97 cdx_vfio_unmap_resource_primary(struct rte_cdx_device *dev)
98 {
99 	char cdx_addr[PATH_MAX] = {0};
100 	struct mapped_cdx_resource *vfio_res = NULL;
101 	struct mapped_cdx_res_list *vfio_res_list;
102 	int ret, vfio_dev_fd;
103 
104 	if (rte_intr_fd_get(dev->intr_handle) >= 0) {
105 		if (rte_cdx_vfio_bm_disable(dev) < 0)
106 			CDX_BUS_ERR("Error when disabling bus master for %s",
107 				    dev->device.name);
108 
109 		if (close(rte_intr_fd_get(dev->intr_handle)) < 0) {
110 			CDX_BUS_ERR("Error when closing eventfd file descriptor for %s",
111 				dev->device.name);
112 			return -1;
113 		}
114 	}
115 
116 	vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
117 	if (vfio_dev_fd < 0)
118 		return -1;
119 
120 	ret = rte_vfio_release_device(RTE_CDX_BUS_DEVICES_PATH, dev->device.name,
121 				      vfio_dev_fd);
122 	if (ret < 0) {
123 		CDX_BUS_ERR("Cannot release VFIO device");
124 		return ret;
125 	}
126 
127 	vfio_res_list =
128 		RTE_TAILQ_CAST(cdx_vfio_tailq.head, mapped_cdx_res_list);
129 	vfio_res = cdx_vfio_find_and_unmap_resource(vfio_res_list, dev);
130 
131 	/* if we haven't found our tailq entry, something's wrong */
132 	if (vfio_res == NULL) {
133 		CDX_BUS_ERR("%s cannot find TAILQ entry for cdx device!",
134 			cdx_addr);
135 		return -1;
136 	}
137 
138 	TAILQ_REMOVE(vfio_res_list, vfio_res, next);
139 	rte_free(vfio_res);
140 	return 0;
141 }
142 
143 static int
144 cdx_vfio_unmap_resource_secondary(struct rte_cdx_device *dev)
145 {
146 	struct mapped_cdx_resource *vfio_res = NULL;
147 	struct mapped_cdx_res_list *vfio_res_list;
148 	int ret, vfio_dev_fd;
149 
150 	vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
151 	if (vfio_dev_fd < 0)
152 		return -1;
153 
154 	ret = rte_vfio_release_device(RTE_CDX_BUS_DEVICES_PATH, dev->device.name,
155 				      vfio_dev_fd);
156 	if (ret < 0) {
157 		CDX_BUS_ERR("Cannot release VFIO device");
158 		return ret;
159 	}
160 
161 	vfio_res_list =
162 		RTE_TAILQ_CAST(cdx_vfio_tailq.head, mapped_cdx_res_list);
163 	vfio_res = cdx_vfio_find_and_unmap_resource(vfio_res_list, dev);
164 
165 	/* if we haven't found our tailq entry, something's wrong */
166 	if (vfio_res == NULL) {
167 		CDX_BUS_ERR("%s cannot find TAILQ entry for CDX device!",
168 			dev->device.name);
169 		return -1;
170 	}
171 
172 	return 0;
173 }
174 
175 int
176 cdx_vfio_unmap_resource(struct rte_cdx_device *dev)
177 {
178 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
179 		return cdx_vfio_unmap_resource_primary(dev);
180 	else
181 		return cdx_vfio_unmap_resource_secondary(dev);
182 }
183 
184 /* set up interrupt support (but not enable interrupts) */
185 static int
186 cdx_vfio_setup_interrupts(struct rte_cdx_device *dev, int vfio_dev_fd,
187 		int num_irqs)
188 {
189 	int i, ret;
190 
191 	if (rte_intr_dev_fd_set(dev->intr_handle, vfio_dev_fd))
192 		return -1;
193 
194 	if (num_irqs == 0)
195 		return 0;
196 
197 	/* start from MSI interrupt type */
198 	for (i = 0; i < num_irqs; i++) {
199 		struct vfio_irq_info irq = { .argsz = sizeof(irq) };
200 		int fd = -1;
201 
202 		irq.index = i;
203 
204 		ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
205 		if (ret < 0) {
206 			CDX_BUS_ERR("Cannot get VFIO IRQ info, error %i (%s)",
207 				errno, strerror(errno));
208 			return -1;
209 		}
210 
211 		/* if this vector cannot be used with eventfd, fail if we explicitly
212 		 * specified interrupt type, otherwise continue
213 		 */
214 		if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
215 			continue;
216 
217 		/* Set nb_intr to the total number of interrupts */
218 		if (rte_intr_event_list_update(dev->intr_handle, irq.count))
219 			return -1;
220 
221 		/* set up an eventfd for interrupts */
222 		fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
223 		if (fd < 0) {
224 			CDX_BUS_ERR("Cannot set up eventfd, error %i (%s)",
225 				errno, strerror(errno));
226 			return -1;
227 		}
228 
229 		if (rte_intr_fd_set(dev->intr_handle, fd))
230 			return -1;
231 
232 		/* DPDK CDX bus currently supports only MSI-X */
233 		if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VFIO_MSIX))
234 			return -1;
235 
236 		return 0;
237 	}
238 
239 	/* if we're here, we haven't found a suitable interrupt vector */
240 	return -1;
241 }
242 
243 static int
244 cdx_vfio_setup_device(struct rte_cdx_device *dev, int vfio_dev_fd,
245 		int num_irqs)
246 {
247 	if (cdx_vfio_setup_interrupts(dev, vfio_dev_fd, num_irqs) != 0) {
248 		CDX_BUS_ERR("Error setting up interrupts!");
249 		return -1;
250 	}
251 
252 	/*
253 	 * Reset the device. If the device is not capable of resetting,
254 	 * then it updates errno as EINVAL.
255 	 */
256 	if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
257 		CDX_BUS_ERR("Unable to reset device! Error: %d (%s)", errno,
258 			strerror(errno));
259 		return -1;
260 	}
261 
262 	/*
263 	 * Enable Bus mastering for the device. errno is set as ENOTTY if
264 	 * device does not support configuring bus master.
265 	 */
266 	if (rte_cdx_vfio_bm_enable(dev) && (errno != -ENOTTY)) {
267 		CDX_BUS_ERR("Bus master enable failure! Error: %d (%s)", errno,
268 			strerror(errno));
269 		return -1;
270 	}
271 
272 	return 0;
273 }
274 
275 static int
276 cdx_vfio_mmap_resource(int vfio_dev_fd, struct mapped_cdx_resource *vfio_res,
277 		int index, int additional_flags)
278 {
279 	struct cdx_map *map = &vfio_res->maps[index];
280 	void *vaddr;
281 
282 	if (map->size == 0) {
283 		CDX_BUS_DEBUG("map size is 0, skip region %d", index);
284 		return 0;
285 	}
286 
287 	/* reserve the address using an inaccessible mapping */
288 	vaddr = mmap(map->addr, map->size, 0, MAP_PRIVATE |
289 		     MAP_ANONYMOUS | additional_flags, -1, 0);
290 	if (vaddr != MAP_FAILED) {
291 		void *map_addr = NULL;
292 
293 		if (map->size) {
294 			/* actual map of first part */
295 			map_addr = cdx_map_resource(vaddr, vfio_dev_fd,
296 						    map->offset, map->size,
297 						    RTE_MAP_FORCE_ADDRESS);
298 		}
299 
300 		if (map_addr == NULL) {
301 			munmap(vaddr, map->size);
302 			vaddr = MAP_FAILED;
303 			CDX_BUS_ERR("Failed to map cdx MMIO region %d", index);
304 			return -1;
305 		}
306 	} else {
307 		CDX_BUS_ERR("Failed to create inaccessible mapping for MMIO region %d",
308 			index);
309 		return -1;
310 	}
311 
312 	map->addr = vaddr;
313 	return 0;
314 }
315 
316 /*
317  * region info may contain capability headers, so we need to keep reallocating
318  * the memory until we match allocated memory size with argsz.
319  */
320 static int
321 cdx_vfio_get_region_info(int vfio_dev_fd, struct vfio_region_info **info,
322 		int region)
323 {
324 	struct vfio_region_info *ri;
325 	size_t argsz = sizeof(*ri);
326 	int ret;
327 
328 	ri = malloc(sizeof(*ri));
329 	if (ri == NULL) {
330 		CDX_BUS_ERR("Cannot allocate memory for VFIO region info");
331 		return -1;
332 	}
333 again:
334 	memset(ri, 0, argsz);
335 	ri->argsz = argsz;
336 	ri->index = region;
337 
338 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ri);
339 	if (ret < 0) {
340 		free(ri);
341 		return ret;
342 	}
343 	if (ri->argsz != argsz) {
344 		struct vfio_region_info *tmp;
345 
346 		argsz = ri->argsz;
347 		tmp = realloc(ri, argsz);
348 
349 		if (tmp == NULL) {
350 			/* realloc failed but the ri is still there */
351 			free(ri);
352 			CDX_BUS_ERR("Cannot reallocate memory for VFIO region info");
353 			return -1;
354 		}
355 		ri = tmp;
356 		goto again;
357 	}
358 	*info = ri;
359 
360 	return 0;
361 }
362 
363 static int
364 find_max_end_va(const struct rte_memseg_list *msl, void *arg)
365 {
366 	size_t sz = msl->len;
367 	void *end_va = RTE_PTR_ADD(msl->base_va, sz);
368 	void **max_va = arg;
369 
370 	if (*max_va < end_va)
371 		*max_va = end_va;
372 	return 0;
373 }
374 
375 static void *
376 cdx_find_max_end_va(void)
377 {
378 	void *va = NULL;
379 
380 	rte_memseg_list_walk(find_max_end_va, &va);
381 	return va;
382 }
383 
384 static int
385 cdx_vfio_map_resource_primary(struct rte_cdx_device *dev)
386 {
387 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
388 	char cdx_addr[PATH_MAX] = {0};
389 	static void *cdx_map_addr;
390 	struct mapped_cdx_resource *vfio_res = NULL;
391 	struct mapped_cdx_res_list *vfio_res_list =
392 		RTE_TAILQ_CAST(cdx_vfio_tailq.head, mapped_cdx_res_list);
393 	const char *dev_name = dev->device.name;
394 	struct cdx_map *maps;
395 	int vfio_dev_fd, i, ret;
396 
397 	if (rte_intr_fd_set(dev->intr_handle, -1))
398 		return -1;
399 
400 	ret = rte_vfio_setup_device(RTE_CDX_BUS_DEVICES_PATH, dev_name,
401 				    &vfio_dev_fd, &device_info);
402 	if (ret)
403 		return ret;
404 
405 	/* allocate vfio_res and get region info */
406 	vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
407 	if (vfio_res == NULL) {
408 		CDX_BUS_ERR("Cannot store VFIO mmap details");
409 		goto err_vfio_dev_fd;
410 	}
411 	memcpy(vfio_res->name, dev_name, RTE_DEV_NAME_MAX_LEN);
412 
413 	/* get number of registers */
414 	vfio_res->nb_maps = device_info.num_regions;
415 
416 	/* map memory regions */
417 	maps = vfio_res->maps;
418 
419 	for (i = 0; i < vfio_res->nb_maps; i++) {
420 		struct vfio_region_info *reg = NULL;
421 		void *vaddr;
422 
423 		ret = cdx_vfio_get_region_info(vfio_dev_fd, &reg, i);
424 		if (ret < 0) {
425 			CDX_BUS_ERR("%s cannot get device region info error %i (%s)",
426 				dev_name, errno, strerror(errno));
427 			goto err_vfio_res;
428 		}
429 
430 		/* skip non-mmappable regions */
431 		if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
432 			free(reg);
433 			continue;
434 		}
435 
436 		/* try mapping somewhere close to the end of hugepages */
437 		if (cdx_map_addr == NULL)
438 			cdx_map_addr = cdx_find_max_end_va();
439 
440 		vaddr = cdx_map_addr;
441 		cdx_map_addr = RTE_PTR_ADD(vaddr, (size_t)reg->size);
442 
443 		cdx_map_addr = RTE_PTR_ALIGN(cdx_map_addr,
444 					     sysconf(_SC_PAGE_SIZE));
445 
446 		maps[i].addr = vaddr;
447 		maps[i].offset = reg->offset;
448 		maps[i].size = reg->size;
449 		maps[i].path = NULL; /* vfio doesn't have per-resource paths */
450 
451 		ret = cdx_vfio_mmap_resource(vfio_dev_fd, vfio_res, i, 0);
452 		if (ret < 0) {
453 			CDX_BUS_ERR("%s mapping region %i failed: %s",
454 				cdx_addr, i, strerror(errno));
455 			free(reg);
456 			goto err_vfio_res;
457 		}
458 
459 		dev->mem_resource[i].addr = maps[i].addr;
460 		dev->mem_resource[i].len = maps[i].size;
461 
462 		free(reg);
463 	}
464 
465 	if (cdx_vfio_setup_device(dev, vfio_dev_fd, device_info.num_irqs) < 0) {
466 		CDX_BUS_ERR("%s setup device failed", dev_name);
467 		goto err_vfio_res;
468 	}
469 
470 	TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
471 
472 	return 0;
473 err_vfio_res:
474 	cdx_vfio_find_and_unmap_resource(vfio_res_list, dev);
475 	rte_free(vfio_res);
476 err_vfio_dev_fd:
477 	rte_vfio_release_device(RTE_CDX_BUS_DEVICES_PATH, dev_name, vfio_dev_fd);
478 	return -1;
479 }
480 
481 static int
482 cdx_vfio_map_resource_secondary(struct rte_cdx_device *dev)
483 {
484 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
485 	char cdx_addr[PATH_MAX] = {0};
486 	int vfio_dev_fd;
487 	int i, ret;
488 	struct mapped_cdx_resource *vfio_res = NULL;
489 	struct mapped_cdx_res_list *vfio_res_list =
490 		RTE_TAILQ_CAST(cdx_vfio_tailq.head, mapped_cdx_res_list);
491 	const char *dev_name = dev->device.name;
492 	struct cdx_map *maps;
493 
494 	if (rte_intr_fd_set(dev->intr_handle, -1))
495 		return -1;
496 
497 	/* if we're in a secondary process, just find our tailq entry */
498 	TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
499 		if (strcmp(vfio_res->name, dev_name))
500 			continue;
501 		break;
502 	}
503 	/* if we haven't found our tailq entry, something's wrong */
504 	if (vfio_res == NULL) {
505 		CDX_BUS_ERR("%s cannot find TAILQ entry for cdx device!",
506 			dev_name);
507 		return -1;
508 	}
509 
510 	ret = rte_vfio_setup_device(RTE_CDX_BUS_DEVICES_PATH, dev_name,
511 					&vfio_dev_fd, &device_info);
512 	if (ret)
513 		return ret;
514 
515 	/* map MMIO regions */
516 	maps = vfio_res->maps;
517 
518 	for (i = 0; i < vfio_res->nb_maps; i++) {
519 		ret = cdx_vfio_mmap_resource(vfio_dev_fd, vfio_res, i, MAP_FIXED);
520 		if (ret < 0) {
521 			CDX_BUS_ERR("%s mapping MMIO region %i failed: %s",
522 				dev_name, i, strerror(errno));
523 			goto err_vfio_dev_fd;
524 		}
525 
526 		dev->mem_resource[i].addr = maps[i].addr;
527 		dev->mem_resource[i].len = maps[i].size;
528 	}
529 
530 	/* we need save vfio_dev_fd, so it can be used during release */
531 	if (rte_intr_dev_fd_set(dev->intr_handle, vfio_dev_fd))
532 		goto err_vfio_dev_fd;
533 
534 	return 0;
535 err_vfio_dev_fd:
536 	rte_vfio_release_device(RTE_CDX_BUS_DEVICES_PATH, cdx_addr, vfio_dev_fd);
537 	return -1;
538 }
539 
540 /*
541  * map the CDX resources of a CDX device in virtual memory (VFIO version).
542  * primary and secondary processes follow almost exactly the same path
543  */
544 int
545 cdx_vfio_map_resource(struct rte_cdx_device *dev)
546 {
547 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
548 		return cdx_vfio_map_resource_primary(dev);
549 	else
550 		return cdx_vfio_map_resource_secondary(dev);
551 }
552 
553 int
554 rte_cdx_vfio_intr_enable(const struct rte_intr_handle *intr_handle)
555 {
556 	char irq_set_buf[MSI_IRQ_SET_BUF_LEN];
557 	struct vfio_irq_set *irq_set;
558 	int *fd_ptr, vfio_dev_fd, i;
559 	int ret;
560 
561 	irq_set = (struct vfio_irq_set *) irq_set_buf;
562 	irq_set->count = rte_intr_nb_intr_get(intr_handle);
563 	irq_set->argsz = sizeof(struct vfio_irq_set) +
564 			 (sizeof(int) * irq_set->count);
565 
566 	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
567 	irq_set->index = 0;
568 	irq_set->start = 0;
569 	fd_ptr = (int *) &irq_set->data;
570 
571 	for (i = 0; i < rte_intr_nb_efd_get(intr_handle); i++)
572 		fd_ptr[i] = rte_intr_efds_index_get(intr_handle, i);
573 
574 	vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
575 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
576 
577 	if (ret) {
578 		CDX_BUS_ERR("Error enabling MSI interrupts for fd %d",
579 			rte_intr_fd_get(intr_handle));
580 		return -1;
581 	}
582 
583 	return 0;
584 }
585 
586 /* disable MSI interrupts */
587 int
588 rte_cdx_vfio_intr_disable(const struct rte_intr_handle *intr_handle)
589 {
590 	struct vfio_irq_set *irq_set;
591 	char irq_set_buf[MSI_IRQ_SET_BUF_LEN];
592 	int len, ret, vfio_dev_fd;
593 
594 	len = sizeof(struct vfio_irq_set);
595 
596 	irq_set = (struct vfio_irq_set *) irq_set_buf;
597 	irq_set->argsz = len;
598 	irq_set->count = 0;
599 	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
600 	irq_set->index = 0;
601 	irq_set->start = 0;
602 
603 	vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
604 	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
605 
606 	if (ret)
607 		CDX_BUS_ERR("Error disabling MSI interrupts for fd %d",
608 			rte_intr_fd_get(intr_handle));
609 
610 	return ret;
611 }
612 
613 /* Enable Bus Mastering */
614 int
615 rte_cdx_vfio_bm_enable(struct rte_cdx_device *dev)
616 {
617 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
618 	struct vfio_device_feature_bus_master *vfio_bm_feature;
619 	struct vfio_device_feature *feature;
620 	int vfio_dev_fd, ret;
621 	size_t argsz;
622 
623 	vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
624 	if (vfio_dev_fd < 0)
625 		return -1;
626 
627 	argsz = sizeof(struct vfio_device_feature) + sizeof(struct vfio_device_feature_bus_master);
628 
629 	feature = (struct vfio_device_feature *)malloc(argsz);
630 	if (!feature)
631 		return -ENOMEM;
632 
633 	vfio_bm_feature = (struct vfio_device_feature_bus_master *) feature->data;
634 
635 	feature->argsz = argsz;
636 
637 	feature->flags = RTE_VFIO_DEVICE_FEATURE_BUS_MASTER | VFIO_DEVICE_FEATURE_PROBE;
638 	feature->flags |= VFIO_DEVICE_FEATURE_SET;
639 	ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature);
640 	if (ret) {
641 		CDX_BUS_ERR("Bus Master configuring not supported for device: %s, error: %d (%s)",
642 			dev->name, errno, strerror(errno));
643 		free(feature);
644 		return ret;
645 	}
646 
647 	feature->flags = RTE_VFIO_DEVICE_FEATURE_BUS_MASTER | VFIO_DEVICE_FEATURE_SET;
648 	vfio_bm_feature->op = VFIO_DEVICE_FEATURE_SET_MASTER;
649 	ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature);
650 	if (ret < 0)
651 		CDX_BUS_ERR("BM Enable Error for device: %s, Error: %d (%s)",
652 			dev->name, errno, strerror(errno));
653 
654 	free(feature);
655 	return ret;
656 }
657 
658 /* Disable Bus Mastering */
659 int
660 rte_cdx_vfio_bm_disable(struct rte_cdx_device *dev)
661 {
662 	struct vfio_device_feature_bus_master *vfio_bm_feature;
663 	struct vfio_device_feature *feature;
664 	int vfio_dev_fd, ret;
665 	size_t argsz;
666 
667 	vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
668 	if (vfio_dev_fd < 0)
669 		return -1;
670 
671 	argsz = sizeof(struct vfio_device_feature) + sizeof(struct vfio_device_feature_bus_master);
672 
673 	feature = (struct vfio_device_feature *)malloc(argsz);
674 	if (!feature)
675 		return -ENOMEM;
676 
677 	vfio_bm_feature = (struct vfio_device_feature_bus_master *) feature->data;
678 
679 	feature->argsz = argsz;
680 
681 	feature->flags = RTE_VFIO_DEVICE_FEATURE_BUS_MASTER | VFIO_DEVICE_FEATURE_PROBE;
682 	feature->flags |= VFIO_DEVICE_FEATURE_SET;
683 	ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature);
684 	if (ret) {
685 		CDX_BUS_ERR("Bus Master configuring not supported for device: %s, Error: %d (%s)",
686 			dev->name, errno, strerror(errno));
687 		free(feature);
688 		return ret;
689 	}
690 
691 	feature->flags = RTE_VFIO_DEVICE_FEATURE_BUS_MASTER | VFIO_DEVICE_FEATURE_SET;
692 	vfio_bm_feature->op = VFIO_DEVICE_FEATURE_CLEAR_MASTER;
693 	ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature);
694 	if (ret < 0)
695 		CDX_BUS_ERR("BM Disable Error for device: %s, Error: %d (%s)",
696 			dev->name, errno, strerror(errno));
697 
698 	free(feature);
699 	return ret;
700 }
701