xref: /netbsd-src/sys/arch/arm/vfp/vfp_init.c (revision 2b3d1ee8a773e028429b331332895d44f445d720)
1 /*      $NetBSD: vfp_init.c,v 1.7 2012/09/22 19:45:54 matt Exp $ */
2 
3 /*
4  * Copyright (c) 2008 ARM Ltd
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the company may not be used to endorse or promote
16  *    products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
25  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/proc.h>
37 #include <sys/cpu.h>
38 
39 #include <arm/pcb.h>
40 #include <arm/undefined.h>
41 #include <arm/vfpreg.h>
42 
43 /*
44  * Use generic co-processor instructions to avoid assembly problems.
45  */
46 
47 /* FMRX <X>, fpsid */
48 static inline uint32_t
49 read_fpsid(void)
50 {
51 	uint32_t rv;
52 	__asm __volatile("mrc p10, 7, %0, c0, c0, 0" : "=r" (rv));
53 	return rv;
54 }
55 
56 /* FMRX <X>, fpexc */
57 static inline uint32_t
58 read_fpscr(void)
59 {
60 	uint32_t rv;
61 	__asm __volatile("mrc p10, 7, %0, c1, c0, 0" : "=r" (rv));
62 	return rv;
63 }
64 
65 /* FMRX <X>, fpexc */
66 static inline uint32_t
67 read_fpexc(void)
68 {
69 	uint32_t rv;
70 	__asm __volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (rv));
71 	return rv;
72 }
73 
74 /* FMRX <X>, fpinst */
75 static inline uint32_t
76 read_fpinst(void)
77 {
78 	uint32_t rv;
79 	__asm __volatile("mrc p10, 7, %0, c9, c0, 0" : "=r" (rv));
80 	return rv;
81 }
82 
83 /* FMRX <X>, fpinst2 */
84 static inline uint32_t
85 read_fpinst2(void)
86 {
87 	uint32_t rv;
88 	__asm __volatile("mrc p10, 7, %0, c10, c0, 0" : "=r" (rv));
89 	return rv;
90 }
91 
92 /* FSTMD <X>, {d0-d15} */
93 #define save_vfpregs(X)	__asm __volatile("stc p11, c0, [%0], {32}" : \
94 			    : "r" (X) : "memory")
95 
96 /* FMXR <X>, fpscr */
97 #define write_fpscr(X)	__asm __volatile("mcr p10, 7, %0, c1, c0, 0" : \
98 			    : "r" (X))
99 /* FMXR <X>, fpexc */
100 #define write_fpexc(X)	__asm __volatile("mcr p10, 7, %0, c8, c0, 0" : \
101 			    : "r" (X))
102 /* FMXR <X>, fpinst */
103 #define write_fpinst(X)	__asm __volatile("mcr p10, 7, %0, c9, c0, 0" : \
104 			    : "r" (X))
105 /* FMXR <X>, fpinst2 */
106 #define write_fpinst2(X) __asm __volatile("mcr p10, 7, %0, c10, c0, 0" : \
107 			    : "r" (X))
108 /* FLDMD <X>, {d0-d15} */
109 #define load_vfpregs(X)	__asm __volatile("ldc p11, c0, [%0], {32}" : \
110 			    : "r" (X) : "memory");
111 
112 #ifdef FPU_VFP
113 
114 /* The real handler for VFP bounces.  */
115 static int vfp_handler(u_int, u_int, trapframe_t *, int);
116 static int vfp_handler(u_int, u_int, trapframe_t *, int);
117 
118 static void vfp_state_load(lwp_t *, bool);
119 static void vfp_state_save(lwp_t *);
120 static void vfp_state_release(lwp_t *);
121 
122 const pcu_ops_t arm_vfp_ops = {
123 	.pcu_id = PCU_FPU,
124 	.pcu_state_load = vfp_state_load,
125 	.pcu_state_save = vfp_state_save,
126 	.pcu_state_release = vfp_state_release,
127 };
128 
129 struct evcnt vfpevent_use;
130 struct evcnt vfpevent_reuse;
131 
132 /*
133  * Used to test for a VFP. The following function is installed as a coproc10
134  * handler on the undefined instruction vector and then we issue a VFP
135  * instruction. If undefined_test is non zero then the VFP did not handle
136  * the instruction so must be absent, or disabled.
137  */
138 
139 static int undefined_test;
140 
141 static int
142 vfp_test(u_int address, u_int insn, trapframe_t *frame, int fault_code)
143 {
144 
145 	frame->tf_pc += INSN_SIZE;
146 	++undefined_test;
147 	return 0;
148 }
149 
150 #endif /* FPU_VFP */
151 
152 struct evcnt vfp_fpscr_ev =
153     EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, NULL, "VFP", "FPSCR traps");
154 EVCNT_ATTACH_STATIC(vfp_fpscr_ev);
155 
156 static int
157 vfp_fpscr_handler(u_int address, u_int insn, trapframe_t *frame, int fault_code)
158 {
159 	struct lwp * const l = curlwp;
160 	const u_int regno = (insn >> 12) & 0xf;
161 	/*
162 	 * Only match move to/from the FPSCR register and we
163 	 * can't be using the SP,LR,PC as a source.
164 	 */
165 	if ((insn & 0xffef0fff) != 0xeee10a10 || regno > 12)
166 		return 1;
167 
168 	struct pcb * const pcb = lwp_getpcb(l);
169 
170 #ifdef FPU_VFP
171 	/*
172 	 * If FPU is valid somewhere, let's just reenable VFP and
173 	 * retry the instruction (only safe thing to do since the
174 	 * pcb has a stale copy).
175 	 */
176 	if (pcb->pcb_vfp.vfp_fpexc & VFP_FPEXC_EN)
177 		return 1;
178 #endif
179 
180 	if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
181 		l->l_md.md_flags |= MDLWP_VFPUSED;
182 		pcb->pcb_vfp.vfp_fpscr =
183 		    (VFP_FPSCR_DN | VFP_FPSCR_FZ);	/* Runfast */
184 	}
185 
186 	/*
187 	 * We know know the pcb has the saved copy.
188 	 */
189 	register_t * const regp = &frame->tf_r0 + regno;
190 	if (insn & 0x00100000) {
191 		*regp = pcb->pcb_vfp.vfp_fpscr;
192 	} else {
193 		pcb->pcb_vfp.vfp_fpscr = *regp;
194 	}
195 
196 	vfp_fpscr_ev.ev_count++;
197 
198 	frame->tf_pc += INSN_SIZE;
199 	return 0;
200 }
201 
202 #ifndef FPU_VFP
203 /*
204  * If we don't want VFP support, we still need to handle emulating VFP FPSCR
205  * instructions.
206  */
207 void
208 vfp_attach(void)
209 {
210 	install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
211 }
212 
213 #else
214 void
215 vfp_attach(void)
216 {
217 	struct cpu_info * const ci = curcpu();
218 	const char *model = NULL;
219 	bool vfp_p = false;
220 
221 #ifdef FPU_VFP
222 	if (CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)
223 	    || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)) {
224 		const uint32_t cpacr_vfp = CPACR_CPn(VFP_COPROC);
225 		const uint32_t cpacr_vfp2 = CPACR_CPn(VFP_COPROC2);
226 
227 		/*
228 		 * We first need to enable access to the coprocessors.
229 		 */
230 		uint32_t cpacr = armreg_cpacr_read();
231 		cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp);
232 		cpacr |= __SHIFTIN(CPACR_ALL, cpacr_vfp2);
233 		armreg_cpacr_write(cpacr);
234 
235 		/*
236 		 * If we could enable them, then they exist.
237 		 */
238 		cpacr = armreg_cpacr_read();
239 		vfp_p = __SHIFTOUT(cpacr, cpacr_vfp2) != CPACR_NOACCESS
240 		    || __SHIFTOUT(cpacr, cpacr_vfp) != CPACR_NOACCESS;
241 	}
242 #endif
243 
244 	void *uh = install_coproc_handler(VFP_COPROC, vfp_test);
245 
246 	undefined_test = 0;
247 
248 	const uint32_t fpsid = read_fpsid();
249 
250 	remove_coproc_handler(uh);
251 
252 	if (undefined_test != 0) {
253 		aprint_normal_dev(ci->ci_dev, "No VFP detected\n");
254 		install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
255 		ci->ci_vfp_id = 0;
256 		return;
257 	}
258 
259 	ci->ci_vfp_id = fpsid;
260 	switch (fpsid & ~ VFP_FPSID_REV_MSK) {
261 	case FPU_VFP10_ARM10E:
262 		model = "VFP10 R1";
263 		break;
264 	case FPU_VFP11_ARM11:
265 		model = "VFP11";
266 		break;
267 	case FPU_VFP_CORTEXA5:
268 	case FPU_VFP_CORTEXA7:
269 	case FPU_VFP_CORTEXA8:
270 	case FPU_VFP_CORTEXA9:
271 		model = "NEON MPE (VFP 3.0+)";
272 		break;
273 	default:
274 		aprint_normal_dev(ci->ci_dev, "unrecognized VFP version %x\n",
275 		    fpsid);
276 		install_coproc_handler(VFP_COPROC, vfp_fpscr_handler);
277 		return;
278 	}
279 
280 	if (fpsid != 0) {
281 		aprint_normal("vfp%d at %s: %s\n",
282 		    device_unit(curcpu()->ci_dev), device_xname(curcpu()->ci_dev),
283 		    model);
284 	}
285 	evcnt_attach_dynamic(&vfpevent_use, EVCNT_TYPE_MISC, NULL,
286 	    "VFP", "proc use");
287 	evcnt_attach_dynamic(&vfpevent_reuse, EVCNT_TYPE_MISC, NULL,
288 	    "VFP", "proc re-use");
289 	install_coproc_handler(VFP_COPROC, vfp_handler);
290 	install_coproc_handler(VFP_COPROC2, vfp_handler);
291 }
292 
293 /* The real handler for VFP bounces.  */
294 static int
295 vfp_handler(u_int address, u_int insn, trapframe_t *frame,
296     int fault_code)
297 {
298 	struct cpu_info * const ci = curcpu();
299 
300 	/* This shouldn't ever happen.  */
301 	if (fault_code != FAULT_USER)
302 		panic("VFP fault in non-user mode");
303 
304 	if (ci->ci_vfp_id == 0)
305 		/* No VFP detected, just fault.  */
306 		return 1;
307 
308 	/*
309 	 * If we are just changing/fetching FPSCR, don't bother loading it.
310 	 */
311 	if (!vfp_fpscr_handler(address, insn, frame, fault_code))
312 		return 0;
313 
314 	pcu_load(&arm_vfp_ops);
315 
316 	/* Need to restart the faulted instruction.  */
317 //	frame->tf_pc -= INSN_SIZE;
318 	return 0;
319 }
320 
321 static void
322 vfp_state_load(lwp_t *l, bool used)
323 {
324 	struct pcb * const pcb = lwp_getpcb(l);
325 	struct vfpreg * const fregs = &pcb->pcb_vfp;
326 
327 	/*
328 	 * Instrument VFP usage -- if a process has not previously
329 	 * used the VFP, mark it as having used VFP for the first time,
330 	 * and count this event.
331 	 *
332 	 * If a process has used the VFP, count a "used VFP, and took
333 	 * a trap to use it again" event.
334 	 */
335 	if (__predict_false((l->l_md.md_flags & MDLWP_VFPUSED) == 0)) {
336 		vfpevent_use.ev_count++;
337 		l->l_md.md_flags |= MDLWP_VFPUSED;
338 		pcb->pcb_vfp.vfp_fpscr =
339 		    (VFP_FPSCR_DN | VFP_FPSCR_FZ);	/* Runfast */
340 	} else {
341 		vfpevent_reuse.ev_count++;
342 	}
343 
344 	if (fregs->vfp_fpexc & VFP_FPEXC_EN) {
345 		/*
346 		 * If we think the VFP is enabled, it must have be disabled by
347 		 * vfp_state_release for another LWP so we can just restore
348 		 * FPEXC and return since our VFP state is still loaded.
349 		 */
350 		write_fpexc(fregs->vfp_fpexc);
351 		return;
352 	}
353 
354 	/* Enable the VFP (so that we can write the registers).  */
355 	uint32_t fpexc = read_fpexc();
356 	KDASSERT((fpexc & VFP_FPEXC_EX) == 0);
357 	write_fpexc(fpexc | VFP_FPEXC_EN);
358 
359 	load_vfpregs(fregs->vfp_regs);
360 	write_fpscr(fregs->vfp_fpscr);
361 
362 	if (fregs->vfp_fpexc & VFP_FPEXC_EX) {
363 		struct cpu_info * const ci = curcpu();
364 		/* Need to restore the exception handling state.  */
365 		switch (ci->ci_vfp_id) {
366 		case FPU_VFP10_ARM10E:
367 		case FPU_VFP11_ARM11:
368 			write_fpinst2(fregs->vfp_fpinst2);
369 			write_fpinst(fregs->vfp_fpinst);
370 			break;
371 		default:
372 			panic("%s: Unsupported VFP %#x",
373 			    __func__, ci->ci_vfp_id);
374 		}
375 	}
376 
377 	/* Finally, restore the FPEXC but don't enable the VFP. */
378 	fregs->vfp_fpexc |= VFP_FPEXC_EN;
379 	write_fpexc(fregs->vfp_fpexc);
380 }
381 
382 void
383 vfp_state_save(lwp_t *l)
384 {
385 	struct pcb * const pcb = lwp_getpcb(l);
386 	struct vfpreg * const fregs = &pcb->pcb_vfp;
387 
388 	/*
389 	 * If it's already disabled, then the state has been saved
390 	 * (or discarded).
391 	 */
392 	if ((fregs->vfp_fpexc & VFP_FPEXC_EN) == 0)
393 		return;
394 
395 	/*
396 	 * Enable the VFP (so we can read the registers).
397 	 * Make sure the exception bit is cleared so that we can
398 	 * safely dump the registers.
399 	 */
400 	uint32_t fpexc = read_fpexc();
401 	write_fpexc((fpexc | VFP_FPEXC_EN) & ~VFP_FPEXC_EX);
402 
403 	fregs->vfp_fpexc = fpexc;
404 	if (fpexc & VFP_FPEXC_EX) {
405 		struct cpu_info * const ci = curcpu();
406 		/* Need to save the exception handling state */
407 		switch (ci->ci_vfp_id) {
408 		case FPU_VFP10_ARM10E:
409 		case FPU_VFP11_ARM11:
410 			fregs->vfp_fpinst = read_fpinst();
411 			fregs->vfp_fpinst2 = read_fpinst2();
412 			break;
413 		default:
414 			panic("%s: Unsupported VFP %#x",
415 			    __func__, ci->ci_vfp_id);
416 		}
417 	}
418 	fregs->vfp_fpscr = read_fpscr();
419 	save_vfpregs(fregs->vfp_regs);
420 
421 	/* Disable the VFP.  */
422 	write_fpexc(fpexc);
423 }
424 
425 void
426 vfp_state_release(lwp_t *l)
427 {
428 	struct pcb * const pcb = lwp_getpcb(l);
429 
430 	/*
431 	 * Now mark the VFP as disabled (and our state has been already
432 	 * saved or is being discarded).
433 	 */
434 	pcb->pcb_vfp.vfp_fpexc &= ~VFP_FPEXC_EN;
435 
436 	/*
437 	 * Turn off the FPU so the next time a VFP instruction is issued
438 	 * an exception happens.  We don't know if this LWP's state was
439 	 * loaded but if we turned off the FPU for some other LWP, when
440 	 * pcu_load invokes vfp_state_load it will see that VFP_FPEXC_EN
441 	 * is still set so it just restroe fpexc and return since its
442 	 * contents are still sitting in the VFP.
443 	 */
444 	write_fpexc(read_fpexc() & ~VFP_FPEXC_EN);
445 }
446 
447 void
448 vfp_savecontext(void)
449 {
450 	pcu_save(&arm_vfp_ops);
451 }
452 
453 void
454 vfp_discardcontext(void)
455 {
456 	pcu_discard(&arm_vfp_ops);
457 }
458 
459 #endif /* FPU_VFP */
460