xref: /dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c (revision 4d4399ae859fbb0b1a4390fb8efb7d79a791a4ad)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017, 2020, 2023 NXP
4  *
5  */
6 
7 #include <unistd.h>
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <stdlib.h>
12 #include <fcntl.h>
13 #include <errno.h>
14 
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <dev_driver.h>
21 #include <ethdev_driver.h>
22 
23 #include <fslmc_logs.h>
24 #include <bus_fslmc_driver.h>
25 #include <mc/fsl_dpci.h>
26 #include "portal/dpaa2_hw_pvt.h"
27 #include "portal/dpaa2_hw_dpio.h"
28 
29 TAILQ_HEAD(dpci_dev_list, dpaa2_dpci_dev);
30 static struct dpci_dev_list dpci_dev_list
31 	= TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
32 
33 static struct dpaa2_dpci_dev *get_dpci_from_id(uint32_t dpci_id)
34 {
35 	struct dpaa2_dpci_dev *dpci_dev = NULL;
36 
37 	/* Get DPCI dev handle from list using index */
38 	TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
39 		if (dpci_dev->dpci_id == dpci_id)
40 			break;
41 	}
42 
43 	return dpci_dev;
44 }
45 
46 static int
47 rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
48 	struct vfio_device_info *obj_info __rte_unused,
49 	struct rte_dpaa2_device *obj)
50 {
51 	struct dpaa2_dpci_dev *dpci_node;
52 	struct dpci_attr attr;
53 	struct dpci_rx_queue_cfg rx_queue_cfg;
54 	struct dpci_rx_queue_attr rx_attr;
55 	struct dpci_tx_queue_attr tx_attr;
56 	int ret, i, dpci_id = obj->object_id;
57 
58 	/* Allocate DPAA2 dpci handle */
59 	dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
60 	if (!dpci_node) {
61 		DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
62 		return -ENOMEM;
63 	}
64 
65 	/* Open the dpci object */
66 	dpci_node->dpci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
67 	ret = dpci_open(&dpci_node->dpci,
68 			CMD_PRI_LOW, dpci_id, &dpci_node->token);
69 	if (ret) {
70 		DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
71 		goto err;
72 	}
73 
74 	/* Get the device attributes */
75 	ret = dpci_get_attributes(&dpci_node->dpci,
76 				  CMD_PRI_LOW, dpci_node->token, &attr);
77 	if (ret != 0) {
78 		DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
79 		goto err;
80 	}
81 
82 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
83 		struct dpaa2_queue *rxq;
84 
85 		memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
86 		ret = dpci_set_rx_queue(&dpci_node->dpci,
87 					CMD_PRI_LOW,
88 					dpci_node->token,
89 					i, &rx_queue_cfg);
90 		if (ret) {
91 			DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
92 				      ret);
93 			goto err;
94 		}
95 
96 		/* Allocate DQ storage for the DPCI Rx queues */
97 		rxq = &dpci_node->rx_queue[i];
98 		ret = dpaa2_queue_storage_alloc(rxq, 1);
99 		if (ret)
100 			goto err;
101 	}
102 
103 	/* Enable the device */
104 	ret = dpci_enable(&dpci_node->dpci,
105 			  CMD_PRI_LOW, dpci_node->token);
106 	if (ret != 0) {
107 		DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
108 		goto err;
109 	}
110 
111 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
112 		/* Get the Rx FQID's */
113 		ret = dpci_get_rx_queue(&dpci_node->dpci,
114 					CMD_PRI_LOW,
115 					dpci_node->token, i,
116 					&rx_attr);
117 		if (ret != 0) {
118 			DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
119 				      ret);
120 			goto err;
121 		}
122 		dpci_node->rx_queue[i].fqid = rx_attr.fqid;
123 
124 		ret = dpci_get_tx_queue(&dpci_node->dpci,
125 					CMD_PRI_LOW,
126 					dpci_node->token, i,
127 					&tx_attr);
128 		if (ret != 0) {
129 			DPAA2_BUS_ERR("Reading device failed with err code: %d",
130 				      ret);
131 			goto err;
132 		}
133 		dpci_node->tx_queue[i].fqid = tx_attr.fqid;
134 	}
135 
136 	dpci_node->dpci_id = dpci_id;
137 	rte_atomic16_init(&dpci_node->in_use);
138 
139 	TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
140 
141 	return 0;
142 
143 err:
144 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
145 		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
146 
147 		dpaa2_queue_storage_free(rxq, 1);
148 	}
149 	rte_free(dpci_node);
150 
151 	return ret;
152 }
153 
154 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
155 {
156 	struct dpaa2_dpci_dev *dpci_dev = NULL;
157 
158 	/* Get DPCI dev handle from list using index */
159 	TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
160 		if (dpci_dev && rte_atomic16_test_and_set(&dpci_dev->in_use))
161 			break;
162 	}
163 
164 	return dpci_dev;
165 }
166 
167 void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
168 {
169 	struct dpaa2_dpci_dev *dpci_dev = NULL;
170 
171 	/* Match DPCI handle and mark it free */
172 	TAILQ_FOREACH(dpci_dev, &dpci_dev_list, next) {
173 		if (dpci_dev == dpci) {
174 			rte_atomic16_dec(&dpci_dev->in_use);
175 			return;
176 		}
177 	}
178 }
179 
180 
181 static void
182 rte_dpaa2_close_dpci_device(int object_id)
183 {
184 	struct dpaa2_dpci_dev *dpci_dev = NULL;
185 
186 	dpci_dev = get_dpci_from_id((uint32_t)object_id);
187 
188 	if (dpci_dev) {
189 		rte_dpaa2_free_dpci_dev(dpci_dev);
190 		dpci_close(&dpci_dev->dpci, CMD_PRI_LOW, dpci_dev->token);
191 		TAILQ_REMOVE(&dpci_dev_list, dpci_dev, next);
192 		rte_free(dpci_dev);
193 	}
194 }
195 
196 static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
197 	.dev_type = DPAA2_CI,
198 	.create = rte_dpaa2_create_dpci_device,
199 	.close = rte_dpaa2_close_dpci_device,
200 };
201 
202 RTE_PMD_REGISTER_DPAA2_OBJECT(dpci, rte_dpaa2_dpci_obj);
203