xref: /dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 
9 #include <rte_bus_vdev.h>
10 #include <rte_atomic.h>
11 #include <rte_interrupts.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_lcore.h>
14 
15 #include <rte_rawdev.h>
16 #include <rte_rawdev_pmd.h>
17 
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
20 #include "dpaa2_cmdif_logs.h"
21 #include "rte_pmd_dpaa2_cmdif.h"
22 
23 /* CMDIF driver name */
24 #define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
25 
26 /*
27  * This API provides the DPCI device ID in 'attr_value'.
28  * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
29  */
30 static int
31 dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
32 		     const char *attr_name,
33 		     uint64_t *attr_value)
34 {
35 	struct dpaa2_dpci_dev *cidev = dev->dev_private;
36 
37 	DPAA2_CMDIF_FUNC_TRACE();
38 
39 	RTE_SET_USED(attr_name);
40 
41 	if (!attr_value) {
42 		DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
43 		return -EINVAL;
44 	}
45 	*attr_value = cidev->dpci_id;
46 
47 	return 0;
48 }
49 
50 static int
51 dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
52 			 struct rte_rawdev_buf **buffers,
53 			 unsigned int count,
54 			 rte_rawdev_obj_t context)
55 {
56 	struct dpaa2_dpci_dev *cidev = dev->dev_private;
57 	struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
58 	struct dpaa2_queue *txq;
59 	struct qbman_fd fd;
60 	struct qbman_eq_desc eqdesc;
61 	struct qbman_swp *swp;
62 	uint32_t retry_count = 0;
63 	int ret;
64 
65 	RTE_SET_USED(count);
66 
67 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
68 		ret = dpaa2_affine_qbman_swp();
69 		if (ret) {
70 			DPAA2_CMDIF_ERR(
71 				"Failed to allocate IO portal, tid: %d\n",
72 				rte_gettid());
73 			return 0;
74 		}
75 	}
76 	swp = DPAA2_PER_LCORE_PORTAL;
77 
78 	cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
79 	txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
80 
81 	/* Prepare enqueue descriptor */
82 	qbman_eq_desc_clear(&eqdesc);
83 	qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
84 	qbman_eq_desc_set_no_orp(&eqdesc, 0);
85 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
86 
87 	/* Set some of the FD parameters to i.
88 	 * For performance reasons do not memset
89 	 */
90 	fd.simple.bpid_offset = 0;
91 	fd.simple.ctrl = 0;
92 
93 	DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
94 	DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
95 	DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
96 	DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
97 
98 	/* Enqueue a packet to the QBMAN */
99 	do {
100 		ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
101 		if (ret < 0 && ret != -EBUSY)
102 			DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
103 		retry_count++;
104 	} while ((ret == -EBUSY) && (retry_count < DPAA2_MAX_TX_RETRY_COUNT));
105 
106 	if (ret < 0)
107 		return ret;
108 
109 	DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
110 
111 	return 1;
112 }
113 
114 static int
115 dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
116 			 struct rte_rawdev_buf **buffers,
117 			 unsigned int count,
118 			 rte_rawdev_obj_t context)
119 {
120 	struct dpaa2_dpci_dev *cidev = dev->dev_private;
121 	struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
122 	struct dpaa2_queue *rxq;
123 	struct qbman_swp *swp;
124 	struct qbman_result *dq_storage;
125 	const struct qbman_fd *fd;
126 	struct qbman_pull_desc pulldesc;
127 	uint8_t status;
128 	int ret;
129 
130 	RTE_SET_USED(count);
131 
132 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
133 		ret = dpaa2_affine_qbman_swp();
134 		if (ret) {
135 			DPAA2_CMDIF_ERR(
136 				"Failed to allocate IO portal, tid: %d\n",
137 				rte_gettid());
138 			return 0;
139 		}
140 	}
141 	swp = DPAA2_PER_LCORE_PORTAL;
142 
143 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
144 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
145 	dq_storage = rxq->q_storage->dq_storage[0];
146 
147 	qbman_pull_desc_clear(&pulldesc);
148 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
149 	qbman_pull_desc_set_numframes(&pulldesc, 1);
150 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
151 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
152 
153 	while (1) {
154 		if (qbman_swp_pull(swp, &pulldesc)) {
155 			DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
156 			/* Portal was busy, try again */
157 			continue;
158 		}
159 		break;
160 	}
161 
162 	/* Check if previous issued command is completed. */
163 	while (!qbman_check_command_complete(dq_storage))
164 		;
165 	/* Loop until the dq_storage is updated with new token by QBMAN */
166 	while (!qbman_result_has_new_result(swp, dq_storage))
167 		;
168 
169 	/* Check for valid frame. */
170 	status = (uint8_t)qbman_result_DQ_flags(dq_storage);
171 	if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
172 		DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
173 		return 0;
174 	}
175 
176 	fd = qbman_result_DQ_fd(dq_storage);
177 
178 	buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
179 			DPAA2_GET_FD_ADDR(fd) +	DPAA2_GET_FD_OFFSET(fd));
180 	cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
181 	cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
182 	cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
183 
184 	DPAA2_CMDIF_DP_DEBUG("packet received\n");
185 
186 	return 1;
187 }
188 
189 static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
190 	.attr_get = dpaa2_cmdif_get_attr,
191 	.enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
192 	.dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
193 };
194 
195 static int
196 dpaa2_cmdif_create(const char *name,
197 		   struct rte_vdev_device *vdev,
198 		   int socket_id)
199 {
200 	struct rte_rawdev *rawdev;
201 	struct dpaa2_dpci_dev *cidev;
202 
203 	/* Allocate device structure */
204 	rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
205 					 socket_id);
206 	if (!rawdev) {
207 		DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
208 		return -EINVAL;
209 	}
210 
211 	rawdev->dev_ops = &dpaa2_cmdif_ops;
212 	rawdev->device = &vdev->device;
213 
214 	/* For secondary processes, the primary has done all the work */
215 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
216 		return 0;
217 
218 	cidev = rte_dpaa2_alloc_dpci_dev();
219 	if (!cidev) {
220 		DPAA2_CMDIF_ERR("Unable to allocate CI device");
221 		rte_rawdev_pmd_release(rawdev);
222 		return -ENODEV;
223 	}
224 
225 	rawdev->dev_private = cidev;
226 
227 	return 0;
228 }
229 
230 static int
231 dpaa2_cmdif_destroy(const char *name)
232 {
233 	int ret;
234 	struct rte_rawdev *rdev;
235 
236 	rdev = rte_rawdev_pmd_get_named_dev(name);
237 	if (!rdev) {
238 		DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
239 		return -EINVAL;
240 	}
241 
242 	/* The primary process will only free the DPCI device */
243 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
244 		rte_dpaa2_free_dpci_dev(rdev->dev_private);
245 
246 	ret = rte_rawdev_pmd_release(rdev);
247 	if (ret)
248 		DPAA2_CMDIF_DEBUG("Device cleanup failed");
249 
250 	return 0;
251 }
252 
253 static int
254 dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
255 {
256 	const char *name;
257 	int ret = 0;
258 
259 	name = rte_vdev_device_name(vdev);
260 
261 	DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
262 
263 	ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
264 
265 	return ret;
266 }
267 
268 static int
269 dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
270 {
271 	const char *name;
272 	int ret;
273 
274 	name = rte_vdev_device_name(vdev);
275 	if (name == NULL)
276 		return -1;
277 
278 	DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
279 
280 	ret = dpaa2_cmdif_destroy(name);
281 
282 	return ret;
283 }
284 
285 static struct rte_vdev_driver dpaa2_cmdif_drv = {
286 	.probe = dpaa2_cmdif_probe,
287 	.remove = dpaa2_cmdif_remove
288 };
289 
290 RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
291 RTE_LOG_REGISTER(dpaa2_cmdif_logtype, pmd.raw.dpaa2.cmdif, INFO);
292