xref: /dpdk/drivers/dma/idxd/idxd_pci.c (revision aa8ed903d29ec90ced2e9dbcf1132d098de3b6f7)
1e33ad06eSKevin Laatz /* SPDX-License-Identifier: BSD-3-Clause
2e33ad06eSKevin Laatz  * Copyright(c) 2021 Intel Corporation
3e33ad06eSKevin Laatz  */
4e33ad06eSKevin Laatz 
51f37cb2bSDavid Marchand #include <bus_pci_driver.h>
69449330aSKevin Laatz #include <rte_devargs.h>
79449330aSKevin Laatz #include <rte_dmadev_pmd.h>
89449330aSKevin Laatz #include <rte_malloc.h>
9e33ad06eSKevin Laatz 
10e33ad06eSKevin Laatz #include "idxd_internal.h"
11e33ad06eSKevin Laatz 
12e33ad06eSKevin Laatz #define IDXD_VENDOR_ID		0x8086
13e33ad06eSKevin Laatz #define IDXD_DEVICE_ID_SPR	0x0B25
14e33ad06eSKevin Laatz 
15*aa8ed903SShaiq Wani #define DEVICE_VERSION_1	0x100
16*aa8ed903SShaiq Wani #define DEVICE_VERSION_2	0x200
17*aa8ed903SShaiq Wani /*
18*aa8ed903SShaiq Wani  * Set bits for Traffic Class A & B
19*aa8ed903SShaiq Wani  * TC-A (Bits 2:0) and TC-B (Bits 5:3)
20*aa8ed903SShaiq Wani  */
21*aa8ed903SShaiq Wani #define IDXD_SET_TC_A_B		0x9
22*aa8ed903SShaiq Wani 
23e33ad06eSKevin Laatz #define IDXD_PMD_DMADEV_NAME_PCI dmadev_idxd_pci
24e33ad06eSKevin Laatz 
25e33ad06eSKevin Laatz const struct rte_pci_id pci_id_idxd_map[] = {
26e33ad06eSKevin Laatz 	{ RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
27e33ad06eSKevin Laatz 	{ .vendor_id = 0, /* sentinel */ },
28e33ad06eSKevin Laatz };
29e33ad06eSKevin Laatz 
309449330aSKevin Laatz static inline int
319449330aSKevin Laatz idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
329449330aSKevin Laatz {
33452c1916SKevin Laatz 	uint32_t err_code;
349449330aSKevin Laatz 	uint16_t qid = idxd->qid;
359449330aSKevin Laatz 	int i = 0;
369449330aSKevin Laatz 
379449330aSKevin Laatz 	if (command >= idxd_disable_wq && command <= idxd_reset_wq)
389449330aSKevin Laatz 		qid = (1 << qid);
399449330aSKevin Laatz 	rte_spinlock_lock(&idxd->u.pci->lk);
409449330aSKevin Laatz 	idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
419449330aSKevin Laatz 
429449330aSKevin Laatz 	do {
439449330aSKevin Laatz 		rte_pause();
449449330aSKevin Laatz 		err_code = idxd->u.pci->regs->cmdstatus;
459449330aSKevin Laatz 		if (++i >= 1000) {
469449330aSKevin Laatz 			IDXD_PMD_ERR("Timeout waiting for command response from HW");
479449330aSKevin Laatz 			rte_spinlock_unlock(&idxd->u.pci->lk);
48452c1916SKevin Laatz 			err_code &= CMDSTATUS_ERR_MASK;
49f217dbffSKevin Laatz 			return err_code;
509449330aSKevin Laatz 		}
519449330aSKevin Laatz 	} while (err_code & CMDSTATUS_ACTIVE_MASK);
529449330aSKevin Laatz 	rte_spinlock_unlock(&idxd->u.pci->lk);
539449330aSKevin Laatz 
549449330aSKevin Laatz 	err_code &= CMDSTATUS_ERR_MASK;
55f217dbffSKevin Laatz 	return err_code;
569449330aSKevin Laatz }
579449330aSKevin Laatz 
589449330aSKevin Laatz static uint32_t *
599449330aSKevin Laatz idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
609449330aSKevin Laatz {
619449330aSKevin Laatz 	return RTE_PTR_ADD(pci->wq_regs_base,
629449330aSKevin Laatz 			(uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
639449330aSKevin Laatz }
649449330aSKevin Laatz 
659449330aSKevin Laatz static int
669449330aSKevin Laatz idxd_is_wq_enabled(struct idxd_dmadev *idxd)
679449330aSKevin Laatz {
689449330aSKevin Laatz 	uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[wq_state_idx];
699449330aSKevin Laatz 	return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
709449330aSKevin Laatz }
719449330aSKevin Laatz 
729449330aSKevin Laatz static int
73a42ac7e3SKevin Laatz idxd_pci_dev_stop(struct rte_dma_dev *dev)
74a42ac7e3SKevin Laatz {
75a42ac7e3SKevin Laatz 	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
76a42ac7e3SKevin Laatz 	uint8_t err_code;
77a42ac7e3SKevin Laatz 
78a42ac7e3SKevin Laatz 	if (!idxd_is_wq_enabled(idxd)) {
79a42ac7e3SKevin Laatz 		IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
80a42ac7e3SKevin Laatz 		return 0;
81a42ac7e3SKevin Laatz 	}
82a42ac7e3SKevin Laatz 
83a42ac7e3SKevin Laatz 	err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
84a42ac7e3SKevin Laatz 	if (err_code || idxd_is_wq_enabled(idxd)) {
85a42ac7e3SKevin Laatz 		IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
86a42ac7e3SKevin Laatz 				idxd->qid, err_code);
87a42ac7e3SKevin Laatz 		return err_code == 0 ? -1 : -err_code;
88a42ac7e3SKevin Laatz 	}
89a42ac7e3SKevin Laatz 	IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
90a42ac7e3SKevin Laatz 
91a42ac7e3SKevin Laatz 	return 0;
92a42ac7e3SKevin Laatz }
93a42ac7e3SKevin Laatz 
94a42ac7e3SKevin Laatz static int
95a42ac7e3SKevin Laatz idxd_pci_dev_start(struct rte_dma_dev *dev)
96a42ac7e3SKevin Laatz {
97a42ac7e3SKevin Laatz 	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
98a42ac7e3SKevin Laatz 	uint8_t err_code;
99a42ac7e3SKevin Laatz 
100a42ac7e3SKevin Laatz 	if (idxd_is_wq_enabled(idxd)) {
101a42ac7e3SKevin Laatz 		IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
102a42ac7e3SKevin Laatz 		return 0;
103a42ac7e3SKevin Laatz 	}
104a42ac7e3SKevin Laatz 
105a42ac7e3SKevin Laatz 	if (idxd->desc_ring == NULL) {
106a42ac7e3SKevin Laatz 		IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
107a42ac7e3SKevin Laatz 		return -EINVAL;
108a42ac7e3SKevin Laatz 	}
109a42ac7e3SKevin Laatz 
110a42ac7e3SKevin Laatz 	err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
111a42ac7e3SKevin Laatz 	if (err_code || !idxd_is_wq_enabled(idxd)) {
112a42ac7e3SKevin Laatz 		IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
113a42ac7e3SKevin Laatz 				idxd->qid, err_code);
114a42ac7e3SKevin Laatz 		return err_code == 0 ? -1 : -err_code;
115a42ac7e3SKevin Laatz 	}
116a42ac7e3SKevin Laatz 	IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
117a42ac7e3SKevin Laatz 
118a42ac7e3SKevin Laatz 	return 0;
119a42ac7e3SKevin Laatz }
120a42ac7e3SKevin Laatz 
121a42ac7e3SKevin Laatz static int
1229449330aSKevin Laatz idxd_pci_dev_close(struct rte_dma_dev *dev)
1239449330aSKevin Laatz {
1249449330aSKevin Laatz 	struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
1259449330aSKevin Laatz 	uint8_t err_code;
1268a6eb404SKevin Laatz 	int is_last_wq;
1279449330aSKevin Laatz 
1288a6eb404SKevin Laatz 	if (idxd_is_wq_enabled(idxd)) {
1298a6eb404SKevin Laatz 		/* disable the wq */
1308a6eb404SKevin Laatz 		err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
1318a6eb404SKevin Laatz 		if (err_code) {
1328a6eb404SKevin Laatz 			IDXD_PMD_ERR("Error disabling wq: code %#x", err_code);
1338a6eb404SKevin Laatz 			return err_code;
1348a6eb404SKevin Laatz 		}
1358a6eb404SKevin Laatz 		IDXD_PMD_DEBUG("IDXD WQ disabled OK");
1368a6eb404SKevin Laatz 	}
1378a6eb404SKevin Laatz 
1388a6eb404SKevin Laatz 	/* free device memory */
1398a6eb404SKevin Laatz 	IDXD_PMD_DEBUG("Freeing device driver memory");
1401a57c8d5SKevin Laatz 	rte_free(idxd->batch_comp_ring);
1418a6eb404SKevin Laatz 	rte_free(idxd->desc_ring);
1428a6eb404SKevin Laatz 
1438a6eb404SKevin Laatz 	/* if this is the last WQ on the device, disable the device and free
1448a6eb404SKevin Laatz 	 * the PCI struct
1458a6eb404SKevin Laatz 	 */
14623e2eaa7STyler Retzlaff 	/* NOTE: review for potential ordering optimization */
147e12a0166STyler Retzlaff 	is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
148e12a0166STyler Retzlaff 			rte_memory_order_seq_cst) == 1);
1498a6eb404SKevin Laatz 	if (is_last_wq) {
1509449330aSKevin Laatz 		/* disable the device */
1519449330aSKevin Laatz 		err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
1529449330aSKevin Laatz 		if (err_code) {
1539449330aSKevin Laatz 			IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
1549449330aSKevin Laatz 			return err_code;
1559449330aSKevin Laatz 		}
1568a6eb404SKevin Laatz 		IDXD_PMD_DEBUG("IDXD device disabled OK");
1578a6eb404SKevin Laatz 		rte_free(idxd->u.pci);
1588a6eb404SKevin Laatz 	}
1599449330aSKevin Laatz 
1609449330aSKevin Laatz 	return 0;
1619449330aSKevin Laatz }
1629449330aSKevin Laatz 
1639449330aSKevin Laatz static const struct rte_dma_dev_ops idxd_pci_ops = {
1649449330aSKevin Laatz 	.dev_close = idxd_pci_dev_close,
16582147042SKevin Laatz 	.dev_dump = idxd_dump,
1662f7d42c6SKevin Laatz 	.dev_configure = idxd_configure,
1672f7d42c6SKevin Laatz 	.vchan_setup = idxd_vchan_setup,
1682f7d42c6SKevin Laatz 	.dev_info_get = idxd_info_get,
169280c3ca0SKevin Laatz 	.stats_get = idxd_stats_get,
170280c3ca0SKevin Laatz 	.stats_reset = idxd_stats_reset,
171a42ac7e3SKevin Laatz 	.dev_start = idxd_pci_dev_start,
172a42ac7e3SKevin Laatz 	.dev_stop = idxd_pci_dev_stop,
1735a23df34SKevin Laatz 	.vchan_status = idxd_vchan_status,
1749449330aSKevin Laatz };
1759449330aSKevin Laatz 
1769449330aSKevin Laatz /* each portal uses 4 x 4k pages */
1779449330aSKevin Laatz #define IDXD_PORTAL_SIZE (4096 * 4)
1789449330aSKevin Laatz 
1799449330aSKevin Laatz static int
1809449330aSKevin Laatz init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
1819449330aSKevin Laatz 		unsigned int max_queues)
1829449330aSKevin Laatz {
1839449330aSKevin Laatz 	struct idxd_pci_common *pci;
1849449330aSKevin Laatz 	uint8_t nb_groups, nb_engines, nb_wqs;
1859449330aSKevin Laatz 	uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
1869449330aSKevin Laatz 	uint16_t wq_size, total_wq_size;
1879449330aSKevin Laatz 	uint8_t lg2_max_batch, lg2_max_copy_size;
188*aa8ed903SShaiq Wani 	uint32_t version;
1899449330aSKevin Laatz 	unsigned int i, err_code;
1909449330aSKevin Laatz 
1918a6eb404SKevin Laatz 	pci = rte_malloc(NULL, sizeof(*pci), 0);
1929449330aSKevin Laatz 	if (pci == NULL) {
1939449330aSKevin Laatz 		IDXD_PMD_ERR("%s: Can't allocate memory", __func__);
1949449330aSKevin Laatz 		err_code = -1;
1959449330aSKevin Laatz 		goto err;
1969449330aSKevin Laatz 	}
1978a6eb404SKevin Laatz 	memset(pci, 0, sizeof(*pci));
1989449330aSKevin Laatz 	rte_spinlock_init(&pci->lk);
1999449330aSKevin Laatz 
2009449330aSKevin Laatz 	/* assign the bar registers, and then configure device */
2019449330aSKevin Laatz 	pci->regs = dev->mem_resource[0].addr;
202*aa8ed903SShaiq Wani 	version = pci->regs->version;
2039449330aSKevin Laatz 	grp_offset = (uint16_t)pci->regs->offsets[0];
2049449330aSKevin Laatz 	pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
2059449330aSKevin Laatz 	wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
2069449330aSKevin Laatz 	pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
2079449330aSKevin Laatz 	pci->portals = dev->mem_resource[2].addr;
2089449330aSKevin Laatz 	pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
2099449330aSKevin Laatz 
2104aa8315eSFrank Du 	/* reset */
2114aa8315eSFrank Du 	idxd->u.pci = pci;
2124aa8315eSFrank Du 	err_code = idxd_pci_dev_command(idxd, idxd_reset_device);
2134aa8315eSFrank Du 	if (err_code) {
2144aa8315eSFrank Du 		IDXD_PMD_ERR("Error reset device: code %#x", err_code);
2154aa8315eSFrank Du 		goto err;
2164aa8315eSFrank Du 	}
2174aa8315eSFrank Du 
2189449330aSKevin Laatz 	/* sanity check device status */
2199449330aSKevin Laatz 	if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
2209449330aSKevin Laatz 		/* need function-level-reset (FLR) or is enabled */
2219449330aSKevin Laatz 		IDXD_PMD_ERR("Device status is not disabled, cannot init");
2229449330aSKevin Laatz 		err_code = -1;
2239449330aSKevin Laatz 		goto err;
2249449330aSKevin Laatz 	}
2259449330aSKevin Laatz 	if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
2269449330aSKevin Laatz 		/* command in progress */
2279449330aSKevin Laatz 		IDXD_PMD_ERR("Device has a command in progress, cannot init");
2289449330aSKevin Laatz 		err_code = -1;
2299449330aSKevin Laatz 		goto err;
2309449330aSKevin Laatz 	}
2319449330aSKevin Laatz 
2329449330aSKevin Laatz 	/* read basic info about the hardware for use when configuring */
2339449330aSKevin Laatz 	nb_groups = (uint8_t)pci->regs->grpcap;
2349449330aSKevin Laatz 	nb_engines = (uint8_t)pci->regs->engcap;
2359449330aSKevin Laatz 	nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
2369449330aSKevin Laatz 	total_wq_size = (uint16_t)pci->regs->wqcap;
2379449330aSKevin Laatz 	lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
2389449330aSKevin Laatz 	lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
2399449330aSKevin Laatz 
2409449330aSKevin Laatz 	IDXD_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
2419449330aSKevin Laatz 			nb_groups, nb_engines, nb_wqs);
2429449330aSKevin Laatz 
2439449330aSKevin Laatz 	/* zero out any old config */
2449449330aSKevin Laatz 	for (i = 0; i < nb_groups; i++) {
2459449330aSKevin Laatz 		pci->grp_regs[i].grpengcfg = 0;
2469449330aSKevin Laatz 		pci->grp_regs[i].grpwqcfg[0] = 0;
247*aa8ed903SShaiq Wani 		if (version <= DEVICE_VERSION_2)
248*aa8ed903SShaiq Wani 			pci->grp_regs[i].grpflags |= IDXD_SET_TC_A_B;
2499449330aSKevin Laatz 	}
2509449330aSKevin Laatz 	for (i = 0; i < nb_wqs; i++)
2519449330aSKevin Laatz 		idxd_get_wq_cfg(pci, i)[0] = 0;
2529449330aSKevin Laatz 
2539449330aSKevin Laatz 	/* limit queues if necessary */
2549449330aSKevin Laatz 	if (max_queues != 0 && nb_wqs > max_queues) {
2559449330aSKevin Laatz 		nb_wqs = max_queues;
2569449330aSKevin Laatz 		if (nb_engines > max_queues)
2579449330aSKevin Laatz 			nb_engines = max_queues;
2589449330aSKevin Laatz 		if (nb_groups > max_queues)
2599449330aSKevin Laatz 			nb_engines = max_queues;
2609449330aSKevin Laatz 		IDXD_PMD_DEBUG("Limiting queues to %u", nb_wqs);
2619449330aSKevin Laatz 	}
2629449330aSKevin Laatz 
2639449330aSKevin Laatz 	/* put each engine into a separate group to avoid reordering */
2649449330aSKevin Laatz 	if (nb_groups > nb_engines)
2659449330aSKevin Laatz 		nb_groups = nb_engines;
2669449330aSKevin Laatz 	if (nb_groups < nb_engines)
2679449330aSKevin Laatz 		nb_engines = nb_groups;
2689449330aSKevin Laatz 
2699449330aSKevin Laatz 	/* assign engines to groups, round-robin style */
2709449330aSKevin Laatz 	for (i = 0; i < nb_engines; i++) {
2719449330aSKevin Laatz 		IDXD_PMD_DEBUG("Assigning engine %u to group %u",
2729449330aSKevin Laatz 				i, i % nb_groups);
2739449330aSKevin Laatz 		pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
2749449330aSKevin Laatz 	}
2759449330aSKevin Laatz 
2769449330aSKevin Laatz 	/* now do the same for queues and give work slots to each queue */
2779449330aSKevin Laatz 	wq_size = total_wq_size / nb_wqs;
2789449330aSKevin Laatz 	IDXD_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
2799449330aSKevin Laatz 			wq_size, lg2_max_batch, lg2_max_copy_size);
2809449330aSKevin Laatz 	for (i = 0; i < nb_wqs; i++) {
2819449330aSKevin Laatz 		/* add engine "i" to a group */
2829449330aSKevin Laatz 		IDXD_PMD_DEBUG("Assigning work queue %u to group %u",
2839449330aSKevin Laatz 				i, i % nb_groups);
2849449330aSKevin Laatz 		pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
2859449330aSKevin Laatz 		/* now configure it, in terms of size, max batch, mode */
2869449330aSKevin Laatz 		idxd_get_wq_cfg(pci, i)[wq_size_idx] = wq_size;
2879449330aSKevin Laatz 		idxd_get_wq_cfg(pci, i)[wq_mode_idx] = (1 << WQ_PRIORITY_SHIFT) |
2889449330aSKevin Laatz 				WQ_MODE_DEDICATED;
2899449330aSKevin Laatz 		idxd_get_wq_cfg(pci, i)[wq_sizes_idx] = lg2_max_copy_size |
2909449330aSKevin Laatz 				(lg2_max_batch << WQ_BATCH_SZ_SHIFT);
2919449330aSKevin Laatz 	}
2929449330aSKevin Laatz 
293*aa8ed903SShaiq Wani 	IDXD_PMD_DEBUG("    Device Version: %"PRIx32, version);
2949449330aSKevin Laatz 	/* dump the group configuration to output */
2959449330aSKevin Laatz 	for (i = 0; i < nb_groups; i++) {
2969449330aSKevin Laatz 		IDXD_PMD_DEBUG("## Group %d", i);
2979449330aSKevin Laatz 		IDXD_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
2989449330aSKevin Laatz 		IDXD_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
2999449330aSKevin Laatz 		IDXD_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
3009449330aSKevin Laatz 	}
3019449330aSKevin Laatz 
3029449330aSKevin Laatz 	idxd->u.pci = pci;
3039449330aSKevin Laatz 	idxd->max_batches = wq_size;
3049459de4eSKevin Laatz 	idxd->max_batch_size = 1 << lg2_max_batch;
3059449330aSKevin Laatz 
3069449330aSKevin Laatz 	/* enable the device itself */
3079449330aSKevin Laatz 	err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
3089449330aSKevin Laatz 	if (err_code) {
3099449330aSKevin Laatz 		IDXD_PMD_ERR("Error enabling device: code %#x", err_code);
3109449330aSKevin Laatz 		goto err;
3119449330aSKevin Laatz 	}
3129449330aSKevin Laatz 	IDXD_PMD_DEBUG("IDXD Device enabled OK");
3139449330aSKevin Laatz 
3149449330aSKevin Laatz 	return nb_wqs;
3159449330aSKevin Laatz 
3169449330aSKevin Laatz err:
31791b026fbSStephen Hemminger 	rte_free(pci);
3189449330aSKevin Laatz 	return err_code;
3199449330aSKevin Laatz }
3209449330aSKevin Laatz 
321e33ad06eSKevin Laatz static int
322e33ad06eSKevin Laatz idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
323e33ad06eSKevin Laatz {
3249449330aSKevin Laatz 	struct idxd_dmadev idxd = {0};
3259449330aSKevin Laatz 	uint8_t nb_wqs;
3269449330aSKevin Laatz 	int qid, ret = 0;
327e33ad06eSKevin Laatz 	char name[PCI_PRI_STR_SIZE];
3289449330aSKevin Laatz 	unsigned int max_queues = 0;
329e33ad06eSKevin Laatz 
330e33ad06eSKevin Laatz 	rte_pci_device_name(&dev->addr, name, sizeof(name));
331e33ad06eSKevin Laatz 	IDXD_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
332e33ad06eSKevin Laatz 	dev->device.driver = &drv->driver;
333e33ad06eSKevin Laatz 
3341ea2cdc1SBruce Richardson 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3351ea2cdc1SBruce Richardson 		char qname[32];
3361ea2cdc1SBruce Richardson 		int max_qid;
3371ea2cdc1SBruce Richardson 
3381ea2cdc1SBruce Richardson 		/* look up queue 0 to get the PCI structure */
3391ea2cdc1SBruce Richardson 		snprintf(qname, sizeof(qname), "%s-q0", name);
340f665790aSDavid Marchand 		IDXD_PMD_INFO("Looking up %s", qname);
3411ea2cdc1SBruce Richardson 		ret = idxd_dmadev_create(qname, &dev->device, NULL, &idxd_pci_ops);
3421ea2cdc1SBruce Richardson 		if (ret != 0) {
3431ea2cdc1SBruce Richardson 			IDXD_PMD_ERR("Failed to create dmadev %s", name);
3441ea2cdc1SBruce Richardson 			return ret;
3451ea2cdc1SBruce Richardson 		}
3461ea2cdc1SBruce Richardson 		qid = rte_dma_get_dev_id_by_name(qname);
347e12a0166STyler Retzlaff 		max_qid = rte_atomic_load_explicit(
34823e2eaa7STyler Retzlaff 			&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
349e12a0166STyler Retzlaff 			rte_memory_order_seq_cst);
3501ea2cdc1SBruce Richardson 
3511ea2cdc1SBruce Richardson 		/* we have queue 0 done, now configure the rest of the queues */
3521ea2cdc1SBruce Richardson 		for (qid = 1; qid < max_qid; qid++) {
3531ea2cdc1SBruce Richardson 			/* add the queue number to each device name */
3541ea2cdc1SBruce Richardson 			snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
355f665790aSDavid Marchand 			IDXD_PMD_INFO("Looking up %s", qname);
3561ea2cdc1SBruce Richardson 			ret = idxd_dmadev_create(qname, &dev->device, NULL, &idxd_pci_ops);
3571ea2cdc1SBruce Richardson 			if (ret != 0) {
3581ea2cdc1SBruce Richardson 				IDXD_PMD_ERR("Failed to create dmadev %s", name);
3591ea2cdc1SBruce Richardson 				return ret;
3601ea2cdc1SBruce Richardson 			}
3611ea2cdc1SBruce Richardson 		}
3621ea2cdc1SBruce Richardson 		return 0;
3631ea2cdc1SBruce Richardson 	}
3641ea2cdc1SBruce Richardson 
3659449330aSKevin Laatz 	if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
3669449330aSKevin Laatz 		/* if the number of devargs grows beyond just 1, use rte_kvargs */
3679449330aSKevin Laatz 		if (sscanf(dev->device.devargs->args,
3689449330aSKevin Laatz 				"max_queues=%u", &max_queues) != 1) {
3699449330aSKevin Laatz 			IDXD_PMD_ERR("Invalid device parameter: '%s'",
3709449330aSKevin Laatz 					dev->device.devargs->args);
3719449330aSKevin Laatz 			return -1;
3729449330aSKevin Laatz 		}
3739449330aSKevin Laatz 	}
3749449330aSKevin Laatz 
3759449330aSKevin Laatz 	ret = init_pci_device(dev, &idxd, max_queues);
3769449330aSKevin Laatz 	if (ret < 0) {
3779449330aSKevin Laatz 		IDXD_PMD_ERR("Error initializing PCI hardware");
3789449330aSKevin Laatz 		return ret;
3799449330aSKevin Laatz 	}
3809449330aSKevin Laatz 	if (idxd.u.pci->portals == NULL) {
381f665790aSDavid Marchand 		IDXD_PMD_ERR("Error, invalid portal assigned during initialization");
3829449330aSKevin Laatz 		free(idxd.u.pci);
3839449330aSKevin Laatz 		return -EINVAL;
3849449330aSKevin Laatz 	}
3859449330aSKevin Laatz 	nb_wqs = (uint8_t)ret;
3869449330aSKevin Laatz 
3879449330aSKevin Laatz 	/* set up one device for each queue */
3889449330aSKevin Laatz 	for (qid = 0; qid < nb_wqs; qid++) {
3899449330aSKevin Laatz 		char qname[32];
3909449330aSKevin Laatz 
3919449330aSKevin Laatz 		/* add the queue number to each device name */
3929449330aSKevin Laatz 		snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
3939449330aSKevin Laatz 		idxd.qid = qid;
3949449330aSKevin Laatz 		idxd.portal = RTE_PTR_ADD(idxd.u.pci->portals,
3959449330aSKevin Laatz 				qid * IDXD_PORTAL_SIZE);
3969449330aSKevin Laatz 		if (idxd_is_wq_enabled(&idxd))
3979449330aSKevin Laatz 			IDXD_PMD_ERR("Error, WQ %u seems enabled", qid);
3989449330aSKevin Laatz 		ret = idxd_dmadev_create(qname, &dev->device,
3999449330aSKevin Laatz 				&idxd, &idxd_pci_ops);
4009449330aSKevin Laatz 		if (ret != 0) {
4019449330aSKevin Laatz 			IDXD_PMD_ERR("Failed to create dmadev %s", name);
4029449330aSKevin Laatz 			if (qid == 0) /* if no devices using this, free pci */
4039449330aSKevin Laatz 				free(idxd.u.pci);
4049449330aSKevin Laatz 			return ret;
4059449330aSKevin Laatz 		}
406e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
4079449330aSKevin Laatz 	}
4089449330aSKevin Laatz 
4099449330aSKevin Laatz 	return 0;
4109449330aSKevin Laatz }
4119449330aSKevin Laatz 
4129449330aSKevin Laatz static int
4139449330aSKevin Laatz idxd_dmadev_destroy(const char *name)
4149449330aSKevin Laatz {
4159449330aSKevin Laatz 	int ret = 0;
4169449330aSKevin Laatz 
4179449330aSKevin Laatz 	/* rte_dma_close is called by pmd_release */
4189449330aSKevin Laatz 	ret = rte_dma_pmd_release(name);
4199449330aSKevin Laatz 	if (ret)
4209449330aSKevin Laatz 		IDXD_PMD_DEBUG("Device cleanup failed");
4219449330aSKevin Laatz 
422e33ad06eSKevin Laatz 	return ret;
423e33ad06eSKevin Laatz }
424e33ad06eSKevin Laatz 
425e33ad06eSKevin Laatz static int
426e33ad06eSKevin Laatz idxd_dmadev_remove_pci(struct rte_pci_device *dev)
427e33ad06eSKevin Laatz {
4289449330aSKevin Laatz 	int i = 0;
429e33ad06eSKevin Laatz 	char name[PCI_PRI_STR_SIZE];
430e33ad06eSKevin Laatz 
431e33ad06eSKevin Laatz 	rte_pci_device_name(&dev->addr, name, sizeof(name));
432e33ad06eSKevin Laatz 
4339449330aSKevin Laatz 	IDXD_PMD_INFO("Closing %s on NUMA node %d", name, dev->device.numa_node);
4349449330aSKevin Laatz 
4359449330aSKevin Laatz 	RTE_DMA_FOREACH_DEV(i) {
436b2942764SKevin Laatz 		struct rte_dma_info info;
437b2942764SKevin Laatz 		rte_dma_info_get(i, &info);
438b2942764SKevin Laatz 		if (strncmp(name, info.dev_name, strlen(name)) == 0)
439b2942764SKevin Laatz 			idxd_dmadev_destroy(info.dev_name);
4409449330aSKevin Laatz 	}
441e33ad06eSKevin Laatz 
442e33ad06eSKevin Laatz 	return 0;
443e33ad06eSKevin Laatz }
444e33ad06eSKevin Laatz 
445e33ad06eSKevin Laatz struct rte_pci_driver idxd_pmd_drv_pci = {
446e33ad06eSKevin Laatz 	.id_table = pci_id_idxd_map,
447e33ad06eSKevin Laatz 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
448e33ad06eSKevin Laatz 	.probe = idxd_dmadev_probe_pci,
449e33ad06eSKevin Laatz 	.remove = idxd_dmadev_remove_pci,
450e33ad06eSKevin Laatz };
451e33ad06eSKevin Laatz 
452e33ad06eSKevin Laatz RTE_PMD_REGISTER_PCI(IDXD_PMD_DMADEV_NAME_PCI, idxd_pmd_drv_pci);
453e33ad06eSKevin Laatz RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_DMADEV_NAME_PCI, pci_id_idxd_map);
454e33ad06eSKevin Laatz RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_DMADEV_NAME_PCI, "vfio-pci");
455e33ad06eSKevin Laatz RTE_PMD_REGISTER_PARAM_STRING(dmadev_idxd_pci, "max_queues=0");
456