xref: /netbsd-src/sys/arch/x86/x86/patch.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: patch.c,v 1.17 2009/04/02 00:19:03 enami Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Patch kernel code at boot time, depending on available CPU features.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.17 2009/04/02 00:19:03 enami Exp $");
38 
39 #include "opt_lockdebug.h"
40 
41 #include <sys/types.h>
42 #include <sys/systm.h>
43 
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/specialreg.h>
47 
48 #include <x86/cpuvar.h>
49 #include <x86/cputypes.h>
50 
51 void	spllower(int);
52 void	spllower_end(void);
53 void	cx8_spllower(int);
54 void	cx8_spllower_end(void);
55 void	cx8_spllower_patch(void);
56 
57 void	mutex_spin_exit_end(void);
58 void	i686_mutex_spin_exit(int);
59 void	i686_mutex_spin_exit_end(void);
60 void	i686_mutex_spin_exit_patch(void);
61 
62 void	membar_consumer(void);
63 void	membar_consumer_end(void);
64 void	membar_sync(void);
65 void	membar_sync_end(void);
66 void	sse2_lfence(void);
67 void	sse2_lfence_end(void);
68 void	sse2_mfence(void);
69 void	sse2_mfence_end(void);
70 
71 void	_atomic_cas_64(void);
72 void	_atomic_cas_64_end(void);
73 void	_atomic_cas_cx8(void);
74 void	_atomic_cas_cx8_end(void);
75 
76 extern void	*x86_lockpatch[];
77 extern void	*atomic_lockpatch[];
78 
79 #define	X86_NOP		0x90
80 #define	X86_REP		0xf3
81 #define	X86_RET		0xc3
82 #define	X86_CS		0x2e
83 #define	X86_DS		0x3e
84 #define	X86_GROUP_0F	0x0f
85 
86 static void
87 adjust_jumpoff(uint8_t *ptr, void *from_s, void *to_s)
88 {
89 
90 	/* Branch hints */
91 	if (ptr[0] == X86_CS || ptr[0] == X86_DS)
92 		ptr++;
93 	/* Conditional jumps */
94 	if (ptr[0] == X86_GROUP_0F)
95 		ptr++;
96 	/* 4-byte relative jump or call */
97 	*(uint32_t *)(ptr + 1 - (uintptr_t)from_s + (uintptr_t)to_s) +=
98 	    ((uint32_t)(uintptr_t)from_s - (uint32_t)(uintptr_t)to_s);
99 }
100 
101 static void __unused
102 patchfunc(void *from_s, void *from_e, void *to_s, void *to_e,
103 	  void *pcrel)
104 {
105 
106 	if ((uintptr_t)from_e - (uintptr_t)from_s !=
107 	    (uintptr_t)to_e - (uintptr_t)to_s)
108 		panic("patchfunc: sizes do not match (from=%p)", from_s);
109 
110 	memcpy(to_s, from_s, (uintptr_t)to_e - (uintptr_t)to_s);
111 	if (pcrel != NULL)
112 		adjust_jumpoff(pcrel, from_s, to_s);
113 
114 #ifdef GPROF
115 #ifdef i386
116 #define	MCOUNT_CALL_OFFSET	3
117 #endif
118 #ifdef __x86_64__
119 #define	MCOUNT_CALL_OFFSET	5
120 #endif
121 	/* Patch mcount call offset */
122 	adjust_jumpoff((uint8_t *)from_s + MCOUNT_CALL_OFFSET, from_s, to_s);
123 #endif
124 }
125 
126 static inline void __unused
127 patchbytes(void *addr, const int byte1, const int byte2)
128 {
129 
130 	((uint8_t *)addr)[0] = (uint8_t)byte1;
131 	if (byte2 != -1)
132 		((uint8_t *)addr)[1] = (uint8_t)byte2;
133 }
134 
135 void
136 x86_patch(bool early)
137 {
138 	static bool first, second;
139 	u_long psl;
140 	u_long cr0;
141 
142 	if (early) {
143 		if (first)
144 			return;
145 		first = true;
146 	} else {
147 		if (second)
148 			return;
149 		second = true;
150 	}
151 
152 	/* Disable interrupts. */
153 	psl = x86_read_psl();
154 	x86_disable_intr();
155 
156 	/* Disable write protection in supervisor mode. */
157 	cr0 = rcr0();
158 	lcr0(cr0 & ~CR0_WP);
159 
160 #if !defined(GPROF)
161 	if (!early && ncpu == 1) {
162 #ifndef LOCKDEBUG
163 		int i;
164 
165 		/* Uniprocessor: kill LOCK prefixes. */
166 		for (i = 0; x86_lockpatch[i] != 0; i++)
167 			patchbytes(x86_lockpatch[i], X86_NOP, -1);
168 		for (i = 0; atomic_lockpatch[i] != 0; i++)
169 			patchbytes(atomic_lockpatch[i], X86_NOP, -1);
170 #endif	/* !LOCKDEBUG */
171 	}
172 	if (!early && (cpu_feature & CPUID_SSE2) != 0) {
173 		/* Faster memory barriers. */
174 		patchfunc(
175 		    sse2_lfence, sse2_lfence_end,
176 		    membar_consumer, membar_consumer_end,
177 		    NULL
178 		);
179 		patchfunc(
180 		    sse2_mfence, sse2_mfence_end,
181 		    membar_sync, membar_sync_end,
182 		    NULL
183 		);
184 	}
185 #endif	/* GPROF */
186 
187 #ifdef i386
188 	/*
189 	 * Patch early and late.  Second time around the 'lock' prefix
190 	 * may be gone.
191 	 */
192 	if ((cpu_feature & CPUID_CX8) != 0) {
193 		patchfunc(
194 		    _atomic_cas_cx8, _atomic_cas_cx8_end,
195 		    _atomic_cas_64, _atomic_cas_64_end,
196 		    NULL
197 		);
198 	}
199 #endif	/* i386 */
200 
201 	if (!early && (cpu_feature & CPUID_CX8) != 0) {
202 		/* Faster splx(), mutex_spin_exit(). */
203 		patchfunc(
204 		    cx8_spllower, cx8_spllower_end,
205 		    spllower, spllower_end,
206 		    cx8_spllower_patch
207 		);
208 #if defined(i386) && !defined(LOCKDEBUG)
209 		patchfunc(
210 		    i686_mutex_spin_exit, i686_mutex_spin_exit_end,
211 		    mutex_spin_exit, mutex_spin_exit_end,
212 		    i686_mutex_spin_exit_patch
213 		);
214 #endif	/* !LOCKDEBUG */
215 	}
216 
217 	/* Write back and invalidate cache, flush pipelines. */
218 	wbinvd();
219 	x86_flush();
220 	x86_write_psl(psl);
221 
222 	/* Re-enable write protection. */
223 	lcr0(cr0);
224 }
225