xref: /dpdk/drivers/bus/dpaa/base/qbman/bman_driver.c (revision e9fd4b87f08d4da01ea9bde075f02e702b65a784)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017 NXP
5  *
6  */
7 
8 #include <rte_branch_prediction.h>
9 
10 #include <fsl_usd.h>
11 #include <process.h>
12 #include "bman_priv.h"
13 #include <sys/ioctl.h>
14 #include <err.h>
15 
16 /*
17  * Global variables of the max portal/pool number this bman version supported
18  */
19 static u16 bman_ip_rev;
20 u16 bman_pool_max;
21 static void *bman_ccsr_map;
22 
23 /*****************/
24 /* Portal driver */
25 /*****************/
26 
27 static __thread int bmfd = -1;
28 static __thread struct bm_portal_config pcfg;
29 static __thread struct dpaa_ioctl_portal_map map = {
30 	.type = dpaa_portal_bman
31 };
32 
fsl_bman_portal_init(uint32_t idx,int is_shared)33 static int fsl_bman_portal_init(uint32_t idx, int is_shared)
34 {
35 	cpu_set_t cpuset;
36 	struct bman_portal *portal;
37 	int loop, ret;
38 	struct dpaa_ioctl_irq_map irq_map;
39 
40 	/* Verify the thread's cpu-affinity */
41 	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
42 				     &cpuset);
43 	if (ret) {
44 		errno = ret;
45 		err(0, "pthread_getaffinity_np()");
46 		return ret;
47 	}
48 	pcfg.cpu = -1;
49 	for (loop = 0; loop < CPU_SETSIZE; loop++)
50 		if (CPU_ISSET(loop, &cpuset)) {
51 			if (pcfg.cpu != -1) {
52 				pr_err("Thread is not affine to 1 cpu");
53 				return -EINVAL;
54 			}
55 			pcfg.cpu = loop;
56 		}
57 	if (pcfg.cpu == -1) {
58 		pr_err("Bug in getaffinity handling!");
59 		return -EINVAL;
60 	}
61 	/* Allocate and map a bman portal */
62 	map.index = idx;
63 	ret = process_portal_map(&map);
64 	if (ret) {
65 		errno = ret;
66 		err(0, "process_portal_map()");
67 		return ret;
68 	}
69 	/* Make the portal's cache-[enabled|inhibited] regions */
70 	pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
71 	pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
72 	pcfg.is_shared = is_shared;
73 	pcfg.index = map.index;
74 	bman_depletion_fill(&pcfg.mask);
75 
76 	bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
77 	if (bmfd == -1) {
78 		pr_err("BMan irq init failed");
79 		process_portal_unmap(&map.addr);
80 		return -EBUSY;
81 	}
82 	/* Use the IRQ FD as a unique IRQ number */
83 	pcfg.irq = bmfd;
84 
85 	portal = bman_create_affine_portal(&pcfg);
86 	if (!portal) {
87 		pr_err("Bman portal initialisation failed (%d)",
88 		       pcfg.cpu);
89 		process_portal_unmap(&map.addr);
90 		return -EBUSY;
91 	}
92 
93 	/* Set the IRQ number */
94 	irq_map.type = dpaa_portal_bman;
95 	irq_map.portal_cinh = map.addr.cinh;
96 	process_portal_irq_map(bmfd, &irq_map);
97 	return 0;
98 }
99 
fsl_bman_portal_finish(void)100 static int fsl_bman_portal_finish(void)
101 {
102 	__maybe_unused const struct bm_portal_config *cfg;
103 	int ret;
104 
105 	process_portal_irq_unmap(bmfd);
106 
107 	cfg = bman_destroy_affine_portal();
108 	DPAA_BUG_ON(cfg != &pcfg);
109 	ret = process_portal_unmap(&map.addr);
110 	if (ret) {
111 		errno = ret;
112 		err(0, "process_portal_unmap()");
113 	}
114 	return ret;
115 }
116 
bman_thread_fd(void)117 int bman_thread_fd(void)
118 {
119 	return bmfd;
120 }
121 
bman_thread_init(void)122 int bman_thread_init(void)
123 {
124 	/* Convert from contiguous/virtual cpu numbering to real cpu when
125 	 * calling into the code that is dependent on the device naming.
126 	 */
127 	return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
128 }
129 
bman_thread_finish(void)130 int bman_thread_finish(void)
131 {
132 	return fsl_bman_portal_finish();
133 }
134 
bman_thread_irq(void)135 void bman_thread_irq(void)
136 {
137 	qbman_invoke_irq(pcfg.irq);
138 	/* Now we need to uninhibit interrupts. This is the only code outside
139 	 * the regular portal driver that manipulates any portal register, so
140 	 * rather than breaking that encapsulation I am simply hard-coding the
141 	 * offset to the inhibit register here.
142 	 */
143 	out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
144 }
145 
bman_init_ccsr(const struct device_node * node)146 int bman_init_ccsr(const struct device_node *node)
147 {
148 	static int ccsr_map_fd;
149 	uint64_t phys_addr;
150 	const uint32_t *bman_addr;
151 	uint64_t regs_size;
152 
153 	bman_addr = of_get_address(node, 0, &regs_size, NULL);
154 	if (!bman_addr) {
155 		pr_err("of_get_address cannot return BMan address");
156 		return -EINVAL;
157 	}
158 	phys_addr = of_translate_address(node, bman_addr);
159 	if (!phys_addr) {
160 		pr_err("of_translate_address failed");
161 		return -EINVAL;
162 	}
163 
164 	ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
165 	if (unlikely(ccsr_map_fd < 0)) {
166 		pr_err("Can not open /dev/mem for BMan CCSR map");
167 		return ccsr_map_fd;
168 	}
169 
170 	bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
171 			     PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
172 	if (bman_ccsr_map == MAP_FAILED) {
173 		pr_err("Can not map BMan CCSR base Bman: "
174 		       "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
175 		       *bman_addr, phys_addr, regs_size);
176 		return -EINVAL;
177 	}
178 
179 	return 0;
180 }
181 
bman_global_init(void)182 int bman_global_init(void)
183 {
184 	const struct device_node *dt_node;
185 	static int done;
186 
187 	if (done)
188 		return -EBUSY;
189 	/* Use the device-tree to determine IP revision until something better
190 	 * is devised.
191 	 */
192 	dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
193 	if (!dt_node) {
194 		pr_err("No bman portals available for any CPU\n");
195 		return -ENODEV;
196 	}
197 	if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
198 	    of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
199 		bman_ip_rev = BMAN_REV10;
200 		bman_pool_max = 64;
201 	} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
202 		of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
203 		bman_ip_rev = BMAN_REV20;
204 		bman_pool_max = 8;
205 	} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
206 		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
207 		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
208 		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
209 		bman_ip_rev = BMAN_REV21;
210 		bman_pool_max = 64;
211 	} else {
212 		pr_warn("unknown BMan version in portal node,default "
213 			"to rev1.0");
214 		bman_ip_rev = BMAN_REV10;
215 		bman_pool_max = 64;
216 	}
217 
218 	if (!bman_ip_rev) {
219 		pr_err("Unknown bman portal version\n");
220 		return -ENODEV;
221 	}
222 	{
223 		const struct device_node *dn = of_find_compatible_node(NULL,
224 							NULL, "fsl,bman");
225 		if (!dn)
226 			pr_err("No bman device node available");
227 
228 		if (bman_init_ccsr(dn))
229 			pr_err("BMan CCSR map failed.");
230 	}
231 
232 	done = 1;
233 	return 0;
234 }
235 
236 #define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
bm_pool_free_buffers(u32 bpid)237 u32 bm_pool_free_buffers(u32 bpid)
238 {
239 	return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
240 }
241 
__generate_thresh(u32 val,int roundup)242 static u32 __generate_thresh(u32 val, int roundup)
243 {
244 	u32 e = 0;      /* co-efficient, exponent */
245 	int oddbit = 0;
246 
247 	while (val > 0xff) {
248 		oddbit = val & 1;
249 		val >>= 1;
250 		e++;
251 		if (roundup && oddbit)
252 			val++;
253 	}
254 	DPAA_ASSERT(e < 0x10);
255 	return (val | (e << 8));
256 }
257 
258 #define POOL_SWDET(n)       (0x0000 + ((n) * 0x04))
259 #define POOL_HWDET(n)       (0x0100 + ((n) * 0x04))
260 #define POOL_SWDXT(n)       (0x0200 + ((n) * 0x04))
261 #define POOL_HWDXT(n)       (0x0300 + ((n) * 0x04))
bm_pool_set(u32 bpid,const u32 * thresholds)262 int bm_pool_set(u32 bpid, const u32 *thresholds)
263 {
264 	if (!bman_ccsr_map)
265 		return -ENODEV;
266 	if (bpid >= bman_pool_max)
267 		return -EINVAL;
268 	out_be32(bman_ccsr_map + POOL_SWDET(bpid),
269 		 __generate_thresh(thresholds[0], 0));
270 	out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
271 		 __generate_thresh(thresholds[1], 1));
272 	out_be32(bman_ccsr_map + POOL_HWDET(bpid),
273 		 __generate_thresh(thresholds[2], 0));
274 	out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
275 		 __generate_thresh(thresholds[3], 1));
276 	return 0;
277 }
278 
279 #define BMAN_LOW_DEFAULT_THRESH		0x40
280 #define BMAN_HIGH_DEFAULT_THRESH		0x80
bm_pool_set_hw_threshold(u32 bpid,const u32 low_thresh,const u32 high_thresh)281 int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
282 			     const u32 high_thresh)
283 {
284 	if (!bman_ccsr_map)
285 		return -ENODEV;
286 	if (bpid >= bman_pool_max)
287 		return -EINVAL;
288 	if (low_thresh && high_thresh) {
289 		out_be32(bman_ccsr_map + POOL_HWDET(bpid),
290 			 __generate_thresh(low_thresh, 0));
291 		out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
292 			 __generate_thresh(high_thresh, 1));
293 	} else {
294 		out_be32(bman_ccsr_map + POOL_HWDET(bpid),
295 			 __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
296 		out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
297 			 __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
298 	}
299 	return 0;
300 }
301