xref: /openbsd-src/sys/arch/amd64/include/cpufunc.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: cpufunc.h,v 1.13 2016/09/04 09:22:28 mpi Exp $	*/
2 /*	$NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $	*/
3 
4 /*-
5  * Copyright (c) 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef _MACHINE_CPUFUNC_H_
34 #define	_MACHINE_CPUFUNC_H_
35 
36 /*
37  * Functions to provide access to i386-specific instructions.
38  */
39 
40 #include <sys/types.h>
41 
42 #include <machine/specialreg.h>
43 
44 static __inline void
45 x86_pause(void)
46 {
47 	/* nothing */
48 }
49 
50 #ifdef _KERNEL
51 
52 extern int cpu_feature;
53 
54 static __inline void
55 invlpg(u_int64_t addr)
56 {
57         __asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
58 }
59 
60 static __inline void
61 lidt(void *p)
62 {
63 	__asm volatile("lidt (%0)" : : "r" (p) : "memory");
64 }
65 
66 static __inline void
67 lldt(u_short sel)
68 {
69 	__asm volatile("lldt %0" : : "r" (sel));
70 }
71 
72 static __inline void
73 ltr(u_short sel)
74 {
75 	__asm volatile("ltr %0" : : "r" (sel));
76 }
77 
78 static __inline void
79 lcr8(u_int val)
80 {
81 	u_int64_t val64 = val;
82 	__asm volatile("movq %0,%%cr8" : : "r" (val64));
83 }
84 
85 /*
86  * Upper 32 bits are reserved anyway, so just keep this 32bits.
87  */
88 static __inline void
89 lcr0(u_int val)
90 {
91 	u_int64_t val64 = val;
92 	__asm volatile("movq %0,%%cr0" : : "r" (val64));
93 }
94 
95 static __inline u_int
96 rcr0(void)
97 {
98 	u_int64_t val64;
99 	u_int val;
100 	__asm volatile("movq %%cr0,%0" : "=r" (val64));
101 	val = val64;
102 	return val;
103 }
104 
105 static __inline u_int64_t
106 rcr2(void)
107 {
108 	u_int64_t val;
109 	__asm volatile("movq %%cr2,%0" : "=r" (val));
110 	return val;
111 }
112 
113 static __inline void
114 lcr3(u_int64_t val)
115 {
116 	__asm volatile("movq %0,%%cr3" : : "r" (val));
117 }
118 
119 static __inline u_int64_t
120 rcr3(void)
121 {
122 	u_int64_t val;
123 	__asm volatile("movq %%cr3,%0" : "=r" (val));
124 	return val;
125 }
126 
127 /*
128  * Same as for cr0. Don't touch upper 32 bits.
129  */
130 static __inline void
131 lcr4(u_int val)
132 {
133 	u_int64_t val64 = val;
134 
135 	__asm volatile("movq %0,%%cr4" : : "r" (val64));
136 }
137 
138 static __inline u_int
139 rcr4(void)
140 {
141 	u_int val;
142 	u_int64_t val64;
143 	__asm volatile("movq %%cr4,%0" : "=r" (val64));
144 	val = val64;
145 	return val;
146 }
147 
148 static __inline void
149 tlbflush(void)
150 {
151 	u_int64_t val;
152 	__asm volatile("movq %%cr3,%0" : "=r" (val));
153 	__asm volatile("movq %0,%%cr3" : : "r" (val));
154 }
155 
156 static __inline void
157 tlbflushg(void)
158 {
159 	/*
160 	 * Big hammer: flush all TLB entries, including ones from PTE's
161 	 * with the G bit set.  This should only be necessary if TLB
162 	 * shootdown falls far behind.
163 	 *
164 	 * Intel Architecture Software Developer's Manual, Volume 3,
165 	 *	System Programming, section 9.10, "Invalidating the
166 	 * Translation Lookaside Buffers (TLBS)":
167 	 * "The following operations invalidate all TLB entries, irrespective
168 	 * of the setting of the G flag:
169 	 * ...
170 	 * "(P6 family processors only): Writing to control register CR4 to
171 	 * modify the PSE, PGE, or PAE flag."
172 	 *
173 	 * (the alternatives not quoted above are not an option here.)
174 	 *
175 	 * If PGE is not in use, we reload CR3 for the benefit of
176 	 * pre-P6-family processors.
177 	 */
178 
179 	if (cpu_feature & CPUID_PGE) {
180 		u_int cr4 = rcr4();
181 		lcr4(cr4 & ~CR4_PGE);
182 		lcr4(cr4);
183 	} else
184 		tlbflush();
185 }
186 
187 #ifdef notyet
188 void	setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl);
189 #endif
190 
191 
192 /* XXXX ought to be in psl.h with spl() functions */
193 
194 static __inline void
195 disable_intr(void)
196 {
197 	__asm volatile("cli");
198 }
199 
200 static __inline void
201 enable_intr(void)
202 {
203 	__asm volatile("sti");
204 }
205 
206 static __inline u_long
207 read_rflags(void)
208 {
209 	u_long	ef;
210 
211 	__asm volatile("pushfq; popq %0" : "=r" (ef));
212 	return (ef);
213 }
214 
215 static __inline void
216 write_rflags(u_long ef)
217 {
218 	__asm volatile("pushq %0; popfq" : : "r" (ef));
219 }
220 
221 static __inline u_long
222 intr_disable(void)
223 {
224 	u_long ef;
225 
226 	ef = read_rflags();
227 	disable_intr();
228 	return (ef);
229 }
230 
231 static __inline void
232 intr_restore(u_long ef)
233 {
234 	write_rflags(ef);
235 }
236 
237 static __inline u_int64_t
238 rdmsr(u_int msr)
239 {
240 	uint32_t hi, lo;
241 	__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
242 	return (((uint64_t)hi << 32) | (uint64_t) lo);
243 }
244 
245 static __inline void
246 wrmsr(u_int msr, u_int64_t newval)
247 {
248 	__asm volatile("wrmsr" :
249 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
250 }
251 
252 /*
253  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
254  *
255  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
256  */
257 
258 #define	OPTERON_MSR_PASSCODE	0x9c5a203a
259 
260 static __inline u_int64_t
261 rdmsr_locked(u_int msr, u_int code)
262 {
263 	uint32_t hi, lo;
264 	__asm volatile("rdmsr"
265 	    : "=d" (hi), "=a" (lo)
266 	    : "c" (msr), "D" (code));
267 	return (((uint64_t)hi << 32) | (uint64_t) lo);
268 }
269 
270 static __inline void
271 wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
272 {
273 	__asm volatile("wrmsr" :
274 	    : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr), "D" (code));
275 }
276 
277 static __inline void
278 wbinvd(void)
279 {
280 	__asm volatile("wbinvd");
281 }
282 
283 static __inline void
284 clflush(u_int64_t addr)
285 {
286 	__asm volatile("clflush %0" : "+m" (*(volatile char *)addr));
287 }
288 
289 static __inline void
290 mfence(void)
291 {
292 	__asm volatile("mfence" : : : "memory");
293 }
294 
295 static __inline u_int64_t
296 rdtsc(void)
297 {
298 	uint32_t hi, lo;
299 
300 	__asm volatile("rdtsc" : "=d" (hi), "=a" (lo));
301 	return (((uint64_t)hi << 32) | (uint64_t) lo);
302 }
303 
304 static __inline u_int64_t
305 rdpmc(u_int pmc)
306 {
307 	uint32_t hi, lo;
308 
309 	__asm volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
310 	return (((uint64_t)hi << 32) | (uint64_t) lo);
311 }
312 
313 static __inline void
314 monitor(const volatile void *addr, u_long extensions, u_int hints)
315 {
316 
317 	__asm volatile("monitor"
318 	    : : "a" (addr), "c" (extensions), "d" (hints));
319 }
320 
321 static __inline void
322 mwait(u_long extensions, u_int hints)
323 {
324 
325 	__asm volatile("mwait" : : "a" (hints), "c" (extensions));
326 }
327 
328 static __inline void
329 xsetbv(uint32_t reg, uint64_t mask)
330 {
331 	uint32_t lo, hi;
332 
333 	lo = mask;
334 	hi = mask >> 32;
335 	__asm volatile("xsetbv" :: "c" (reg), "a" (lo), "d" (hi) : "memory");
336 }
337 
338 /* Break into DDB/KGDB. */
339 static __inline void
340 breakpoint(void)
341 {
342 	__asm volatile("int $3");
343 }
344 
345 #define read_psl()	read_rflags()
346 #define write_psl(x)	write_rflags(x)
347 
348 void amd64_errata(struct cpu_info *);
349 
350 #endif /* _KERNEL */
351 
352 #endif /* !_MACHINE_CPUFUNC_H_ */
353