1 /* $NetBSD: cpufunc.h,v 1.5 2005/12/11 12:16:25 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #ifndef _AMD64_CPUFUNC_H_ 40 #define _AMD64_CPUFUNC_H_ 41 42 /* 43 * Functions to provide access to i386-specific instructions. 44 */ 45 46 #include <sys/cdefs.h> 47 #include <sys/types.h> 48 49 #include <machine/specialreg.h> 50 51 static __inline void 52 x86_pause(void) 53 { 54 /* nothing */ 55 } 56 57 static __inline void 58 x86_lfence(void) 59 { 60 61 /* 62 * XXX if lfence isn't available... 63 * 64 * memory clobber to avoid compiler reordering. 65 */ 66 __asm __volatile("lfence" : : : "memory"); 67 } 68 69 #ifdef _KERNEL 70 71 extern int cpu_feature; 72 73 static __inline void 74 invlpg(u_int64_t addr) 75 { 76 __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory"); 77 } 78 79 static __inline void 80 lidt(void *p) 81 { 82 __asm __volatile("lidt (%0)" : : "r" (p)); 83 } 84 85 static __inline void 86 lldt(u_short sel) 87 { 88 __asm __volatile("lldt %0" : : "r" (sel)); 89 } 90 91 static __inline void 92 ltr(u_short sel) 93 { 94 __asm __volatile("ltr %0" : : "r" (sel)); 95 } 96 97 static __inline void 98 lcr8(u_int val) 99 { 100 u_int64_t val64 = val; 101 __asm __volatile("movq %0,%%cr8" : : "r" (val64)); 102 } 103 104 /* 105 * Upper 32 bits are reserved anyway, so just keep this 32bits. 106 */ 107 static __inline void 108 lcr0(u_int val) 109 { 110 u_int64_t val64 = val; 111 __asm __volatile("movq %0,%%cr0" : : "r" (val64)); 112 } 113 114 static __inline u_int 115 rcr0(void) 116 { 117 u_int64_t val64; 118 u_int val; 119 __asm __volatile("movq %%cr0,%0" : "=r" (val64)); 120 val = val64; 121 return val; 122 } 123 124 static __inline u_int64_t 125 rcr2(void) 126 { 127 u_int64_t val; 128 __asm __volatile("movq %%cr2,%0" : "=r" (val)); 129 return val; 130 } 131 132 static __inline void 133 lcr3(u_int64_t val) 134 { 135 __asm __volatile("movq %0,%%cr3" : : "r" (val)); 136 } 137 138 static __inline u_int64_t 139 rcr3(void) 140 { 141 u_int64_t val; 142 __asm __volatile("movq %%cr3,%0" : "=r" (val)); 143 return val; 144 } 145 146 /* 147 * Same as for cr0. Don't touch upper 32 bits. 148 */ 149 static __inline void 150 lcr4(u_int val) 151 { 152 u_int64_t val64 = val; 153 154 __asm __volatile("movq %0,%%cr4" : : "r" (val64)); 155 } 156 157 static __inline u_int 158 rcr4(void) 159 { 160 u_int val; 161 u_int64_t val64; 162 __asm __volatile("movq %%cr4,%0" : "=r" (val64)); 163 val = val64; 164 return val; 165 } 166 167 static __inline void 168 tlbflush(void) 169 { 170 u_int64_t val; 171 __asm __volatile("movq %%cr3,%0" : "=r" (val)); 172 __asm __volatile("movq %0,%%cr3" : : "r" (val)); 173 } 174 175 static __inline void 176 tlbflushg(void) 177 { 178 /* 179 * Big hammer: flush all TLB entries, including ones from PTE's 180 * with the G bit set. This should only be necessary if TLB 181 * shootdown falls far behind. 182 * 183 * Intel Architecture Software Developer's Manual, Volume 3, 184 * System Programming, section 9.10, "Invalidating the 185 * Translation Lookaside Buffers (TLBS)": 186 * "The following operations invalidate all TLB entries, irrespective 187 * of the setting of the G flag: 188 * ... 189 * "(P6 family processors only): Writing to control register CR4 to 190 * modify the PSE, PGE, or PAE flag." 191 * 192 * (the alternatives not quoted above are not an option here.) 193 * 194 * If PGE is not in use, we reload CR3 for the benefit of 195 * pre-P6-family processors. 196 */ 197 198 if (cpu_feature & CPUID_PGE) { 199 u_int cr4 = rcr4(); 200 lcr4(cr4 & ~CR4_PGE); 201 lcr4(cr4); 202 } else 203 tlbflush(); 204 } 205 206 #ifdef notyet 207 void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl)); 208 #endif 209 210 211 /* XXXX ought to be in psl.h with spl() functions */ 212 213 static __inline void 214 disable_intr(void) 215 { 216 __asm __volatile("cli"); 217 } 218 219 static __inline void 220 enable_intr(void) 221 { 222 __asm __volatile("sti"); 223 } 224 225 static __inline u_long 226 read_rflags(void) 227 { 228 u_long ef; 229 230 __asm __volatile("pushfq; popq %0" : "=r" (ef)); 231 return (ef); 232 } 233 234 static __inline void 235 write_rflags(u_long ef) 236 { 237 __asm __volatile("pushq %0; popfq" : : "r" (ef)); 238 } 239 240 static __inline u_int64_t 241 rdmsr(u_int msr) 242 { 243 uint32_t hi, lo; 244 __asm __volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr)); 245 return (((uint64_t)hi << 32) | (uint64_t) lo); 246 } 247 248 static __inline void 249 wrmsr(u_int msr, u_int64_t newval) 250 { 251 __asm __volatile("wrmsr" : 252 : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr)); 253 } 254 255 static __inline void 256 wbinvd(void) 257 { 258 __asm __volatile("wbinvd"); 259 } 260 261 static __inline u_int64_t 262 rdtsc(void) 263 { 264 uint32_t hi, lo; 265 266 __asm __volatile("rdtsc" : "=d" (hi), "=a" (lo)); 267 return (((uint64_t)hi << 32) | (uint64_t) lo); 268 } 269 270 static __inline u_int64_t 271 rdpmc(u_int pmc) 272 { 273 uint32_t hi, lo; 274 275 __asm __volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc)); 276 return (((uint64_t)hi << 32) | (uint64_t) lo); 277 } 278 279 /* Break into DDB/KGDB. */ 280 static __inline void 281 breakpoint(void) 282 { 283 __asm __volatile("int $3"); 284 } 285 286 #define read_psl() read_rflags() 287 #define write_psl(x) write_rflags(x) 288 289 #endif /* _KERNEL */ 290 291 #endif /* !_AMD64_CPUFUNC_H_ */ 292