xref: /dpdk/drivers/bus/dpaa/base/qbman/qman_driver.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017 NXP
5  *
6  */
7 
8 #include <fsl_usd.h>
9 #include <process.h>
10 #include "qman_priv.h"
11 #include <sys/ioctl.h>
12 #include <rte_branch_prediction.h>
13 
14 /* Global variable containing revision id (even on non-control plane systems
15  * where CCSR isn't available).
16  */
17 u16 qman_ip_rev;
18 u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
19 u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
20 u16 qm_channel_pme = QMAN_CHANNEL_PME;
21 
22 /* Ccsr map address to access ccsrbased register */
23 static void *qman_ccsr_map;
24 /* The qman clock frequency */
25 static u32 qman_clk;
26 
27 static __thread int qmfd = -1;
28 static __thread struct qm_portal_config qpcfg;
29 static __thread struct dpaa_ioctl_portal_map map = {
30 	.type = dpaa_portal_qman
31 };
32 
33 static int fsl_qman_portal_init(uint32_t index, int is_shared)
34 {
35 	cpu_set_t cpuset;
36 	struct qman_portal *portal;
37 	int loop, ret;
38 	struct dpaa_ioctl_irq_map irq_map;
39 
40 	/* Verify the thread's cpu-affinity */
41 	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
42 				     &cpuset);
43 	if (ret) {
44 		error(0, ret, "pthread_getaffinity_np()");
45 		return ret;
46 	}
47 	qpcfg.cpu = -1;
48 	for (loop = 0; loop < CPU_SETSIZE; loop++)
49 		if (CPU_ISSET(loop, &cpuset)) {
50 			if (qpcfg.cpu != -1) {
51 				pr_err("Thread is not affine to 1 cpu\n");
52 				return -EINVAL;
53 			}
54 			qpcfg.cpu = loop;
55 		}
56 	if (qpcfg.cpu == -1) {
57 		pr_err("Bug in getaffinity handling!\n");
58 		return -EINVAL;
59 	}
60 
61 	/* Allocate and map a qman portal */
62 	map.index = index;
63 	ret = process_portal_map(&map);
64 	if (ret) {
65 		error(0, ret, "process_portal_map()");
66 		return ret;
67 	}
68 	qpcfg.channel = map.channel;
69 	qpcfg.pools = map.pools;
70 	qpcfg.index = map.index;
71 
72 	/* Make the portal's cache-[enabled|inhibited] regions */
73 	qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
74 	qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
75 
76 	qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
77 	if (qmfd == -1) {
78 		pr_err("QMan irq init failed\n");
79 		process_portal_unmap(&map.addr);
80 		return -EBUSY;
81 	}
82 
83 	qpcfg.is_shared = is_shared;
84 	qpcfg.node = NULL;
85 	qpcfg.irq = qmfd;
86 
87 	portal = qman_create_affine_portal(&qpcfg, NULL, 0);
88 	if (!portal) {
89 		pr_err("Qman portal initialisation failed (%d)\n",
90 		       qpcfg.cpu);
91 		process_portal_unmap(&map.addr);
92 		return -EBUSY;
93 	}
94 
95 	irq_map.type = dpaa_portal_qman;
96 	irq_map.portal_cinh = map.addr.cinh;
97 	process_portal_irq_map(qmfd, &irq_map);
98 	return 0;
99 }
100 
101 static int fsl_qman_portal_finish(void)
102 {
103 	__maybe_unused const struct qm_portal_config *cfg;
104 	int ret;
105 
106 	process_portal_irq_unmap(qmfd);
107 
108 	cfg = qman_destroy_affine_portal(NULL);
109 	DPAA_BUG_ON(cfg != &qpcfg);
110 	ret = process_portal_unmap(&map.addr);
111 	if (ret)
112 		error(0, ret, "process_portal_unmap()");
113 	return ret;
114 }
115 
116 int qman_thread_fd(void)
117 {
118 	return qmfd;
119 }
120 
121 int qman_thread_init(void)
122 {
123 	/* Convert from contiguous/virtual cpu numbering to real cpu when
124 	 * calling into the code that is dependent on the device naming.
125 	 */
126 	return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
127 }
128 
129 int qman_thread_finish(void)
130 {
131 	return fsl_qman_portal_finish();
132 }
133 
134 void qman_thread_irq(void)
135 {
136 	qbman_invoke_irq(qpcfg.irq);
137 
138 	/* Now we need to uninhibit interrupts. This is the only code outside
139 	 * the regular portal driver that manipulates any portal register, so
140 	 * rather than breaking that encapsulation I am simply hard-coding the
141 	 * offset to the inhibit register here.
142 	 */
143 	out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0x36C0, 0);
144 }
145 
146 struct qman_portal *fsl_qman_portal_create(void)
147 {
148 	cpu_set_t cpuset;
149 	struct qman_portal *res;
150 
151 	struct qm_portal_config *q_pcfg;
152 	int loop, ret;
153 	struct dpaa_ioctl_irq_map irq_map;
154 	struct dpaa_ioctl_portal_map q_map = {0};
155 	int q_fd;
156 
157 	q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
158 	if (!q_pcfg) {
159 		error(0, -1, "q_pcfg kzalloc failed");
160 		return NULL;
161 	}
162 
163 	/* Verify the thread's cpu-affinity */
164 	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
165 				     &cpuset);
166 	if (ret) {
167 		error(0, ret, "pthread_getaffinity_np()");
168 		kfree(q_pcfg);
169 		return NULL;
170 	}
171 
172 	q_pcfg->cpu = -1;
173 	for (loop = 0; loop < CPU_SETSIZE; loop++)
174 		if (CPU_ISSET(loop, &cpuset)) {
175 			if (q_pcfg->cpu != -1) {
176 				pr_err("Thread is not affine to 1 cpu\n");
177 				kfree(q_pcfg);
178 				return NULL;
179 			}
180 			q_pcfg->cpu = loop;
181 		}
182 	if (q_pcfg->cpu == -1) {
183 		pr_err("Bug in getaffinity handling!\n");
184 		kfree(q_pcfg);
185 		return NULL;
186 	}
187 
188 	/* Allocate and map a qman portal */
189 	q_map.type = dpaa_portal_qman;
190 	q_map.index = QBMAN_ANY_PORTAL_IDX;
191 	ret = process_portal_map(&q_map);
192 	if (ret) {
193 		error(0, ret, "process_portal_map()");
194 		kfree(q_pcfg);
195 		return NULL;
196 	}
197 	q_pcfg->channel = q_map.channel;
198 	q_pcfg->pools = q_map.pools;
199 	q_pcfg->index = q_map.index;
200 
201 	/* Make the portal's cache-[enabled|inhibited] regions */
202 	q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena;
203 	q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh;
204 
205 	q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
206 	if (q_fd == -1) {
207 		pr_err("QMan irq init failed\n");
208 		goto err1;
209 	}
210 
211 	q_pcfg->irq = q_fd;
212 
213 	res = qman_create_affine_portal(q_pcfg, NULL, true);
214 	if (!res) {
215 		pr_err("Qman portal initialisation failed (%d)\n",
216 		       q_pcfg->cpu);
217 		goto err2;
218 	}
219 
220 	irq_map.type = dpaa_portal_qman;
221 	irq_map.portal_cinh = q_map.addr.cinh;
222 	process_portal_irq_map(q_fd, &irq_map);
223 
224 	return res;
225 err2:
226 	close(q_fd);
227 err1:
228 	process_portal_unmap(&q_map.addr);
229 	kfree(q_pcfg);
230 	return NULL;
231 }
232 
233 int fsl_qman_portal_destroy(struct qman_portal *qp)
234 {
235 	const struct qm_portal_config *cfg;
236 	struct dpaa_portal_map addr;
237 	int ret;
238 
239 	cfg = qman_destroy_affine_portal(qp);
240 	kfree(qp);
241 
242 	process_portal_irq_unmap(cfg->irq);
243 
244 	addr.cena = cfg->addr_virt[DPAA_PORTAL_CE];
245 	addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI];
246 
247 	ret = process_portal_unmap(&addr);
248 	if (ret)
249 		pr_err("process_portal_unmap() (%d)\n", ret);
250 
251 	kfree((void *)cfg);
252 
253 	return ret;
254 }
255 
256 int qman_global_init(void)
257 {
258 	const struct device_node *dt_node;
259 	size_t lenp;
260 	const u32 *chanid;
261 	static int ccsr_map_fd;
262 	const uint32_t *qman_addr;
263 	uint64_t phys_addr;
264 	uint64_t regs_size;
265 	const u32 *clk;
266 
267 	static int done;
268 
269 	if (done)
270 		return -EBUSY;
271 
272 	/* Use the device-tree to determine IP revision until something better
273 	 * is devised.
274 	 */
275 	dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
276 	if (!dt_node) {
277 		pr_err("No qman portals available for any CPU\n");
278 		return -ENODEV;
279 	}
280 	if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
281 	    of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
282 		pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
283 	else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
284 		 of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
285 		qman_ip_rev = QMAN_REV11;
286 	else if	(of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
287 		 of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
288 		qman_ip_rev = QMAN_REV12;
289 	else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
290 		 of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
291 		qman_ip_rev = QMAN_REV20;
292 	else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
293 		 of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
294 		qman_ip_rev = QMAN_REV30;
295 	else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
296 		 of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
297 		of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
298 		of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
299 		qman_ip_rev = QMAN_REV31;
300 	else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
301 		 of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
302 		qman_ip_rev = QMAN_REV32;
303 	else
304 		qman_ip_rev = QMAN_REV11;
305 
306 	if (!qman_ip_rev) {
307 		pr_err("Unknown qman portal version\n");
308 		return -ENODEV;
309 	}
310 	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
311 		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
312 		qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
313 		qm_channel_pme = QMAN_CHANNEL_PME_REV3;
314 	}
315 
316 	dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
317 	if (!dt_node) {
318 		pr_err("No qman pool channel range available\n");
319 		return -ENODEV;
320 	}
321 	chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
322 	if (!chanid) {
323 		pr_err("Can not get pool-channel-range property\n");
324 		return -EINVAL;
325 	}
326 
327 	/* get ccsr base */
328 	dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
329 	if (!dt_node) {
330 		pr_err("No qman device node available\n");
331 		return -ENODEV;
332 	}
333 	qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
334 	if (!qman_addr) {
335 		pr_err("of_get_address cannot return qman address\n");
336 		return -EINVAL;
337 	}
338 	phys_addr = of_translate_address(dt_node, qman_addr);
339 	if (!phys_addr) {
340 		pr_err("of_translate_address failed\n");
341 		return -EINVAL;
342 	}
343 
344 	ccsr_map_fd = open("/dev/mem", O_RDWR);
345 	if (unlikely(ccsr_map_fd < 0)) {
346 		pr_err("Can not open /dev/mem for qman ccsr map\n");
347 		return ccsr_map_fd;
348 	}
349 
350 	qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
351 			     MAP_SHARED, ccsr_map_fd, phys_addr);
352 	if (qman_ccsr_map == MAP_FAILED) {
353 		pr_err("Can not map qman ccsr base\n");
354 		return -EINVAL;
355 	}
356 
357 	clk = of_get_property(dt_node, "clock-frequency", NULL);
358 	if (!clk)
359 		pr_warn("Can't find Qman clock frequency\n");
360 	else
361 		qman_clk = be32_to_cpu(*clk);
362 
363 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
364 	return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
365 #endif
366 	return 0;
367 }
368