xref: /netbsd-src/sys/arch/x86/x86/patch.c (revision 0df165c04d0a9ca1adde9ed2b890344c937954a6)
1 /*	$NetBSD: patch.c,v 1.8 2007/11/28 15:26:00 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Patch kernel code at boot time, depending on available CPU features.
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.8 2007/11/28 15:26:00 ad Exp $");
45 
46 #include "opt_lockdebug.h"
47 
48 #include <sys/types.h>
49 #include <sys/systm.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/cpufunc.h>
53 #include <machine/specialreg.h>
54 
55 #include <x86/cpuvar.h>
56 #include <x86/cputypes.h>
57 
58 void	spllower(int);
59 void	spllower_end(void);
60 void	cx8_spllower(int);
61 void	cx8_spllower_end(void);
62 void	cx8_spllower_patch(void);
63 
64 void	mutex_spin_exit_end(void);
65 void	i686_mutex_spin_exit(int);
66 void	i686_mutex_spin_exit_end(void);
67 void	i686_mutex_spin_exit_patch(void);
68 
69 void	membar_consumer(void);
70 void	membar_consumer_end(void);
71 void	membar_sync(void);
72 void	membar_sync_end(void);
73 void	sse2_lfence(void);
74 void	sse2_lfence_end(void);
75 void	sse2_mfence(void);
76 void	sse2_mfence_end(void);
77 
78 void	mb_read(void);
79 void	mb_read_end(void);
80 void	mb_write(void);
81 void	mb_write_end(void);
82 void	mb_memory(void);
83 void	mb_memory_end(void);
84 void	x86_mb_nop(void);
85 void	x86_mb_nop_end(void);
86 void	sse2_mb_read(void);
87 void	sse2_mb_read_end(void);
88 void	sse2_mb_memory(void);
89 void	sse2_mb_memory_end(void);
90 
91 extern void	*x86_lockpatch[];
92 extern void	*atomic_lockpatch[];
93 
94 #define	X86_NOP		0x90
95 #define	X86_REP		0xf3
96 #define	X86_RET		0xc3
97 #define	X86_CS		0x2e
98 #define	X86_DS		0x3e
99 #define	X86_GROUP_0F	0x0f
100 
101 static void __attribute__ ((__unused__))
102 patchfunc(void *from_s, void *from_e, void *to_s, void *to_e,
103 	  void *pcrel)
104 {
105 	uint8_t *ptr;
106 
107 	if ((uintptr_t)from_e - (uintptr_t)from_s !=
108 	    (uintptr_t)to_e - (uintptr_t)to_s)
109 		panic("patchfunc: sizes do not match (from=%p)", from_s);
110 
111 	memcpy(to_s, from_s, (uintptr_t)to_e - (uintptr_t)to_s);
112 	if (pcrel != NULL) {
113 		ptr = pcrel;
114 		/* Branch hints */
115 		if (ptr[0] == X86_CS || ptr[0] == X86_DS)
116 			ptr++;
117 		/* Conditional jumps */
118 		if (ptr[0] == X86_GROUP_0F)
119 			ptr++;
120 		/* 4-byte relative jump or call */
121 		*(uint32_t *)(ptr + 1 - (uintptr_t)from_s + (uintptr_t)to_s) +=
122 		    ((uint32_t)(uintptr_t)from_s - (uint32_t)(uintptr_t)to_s);
123 	}
124 }
125 
126 static inline void  __attribute__ ((__unused__))
127 patchbytes(void *addr, const int byte1, const int byte2)
128 {
129 
130 	((uint8_t *)addr)[0] = (uint8_t)byte1;
131 	if (byte2 != -1)
132 		((uint8_t *)addr)[1] = (uint8_t)byte2;
133 }
134 
135 void
136 x86_patch(void)
137 {
138 #if !defined(GPROF)
139 	static int again;
140 	u_long psl;
141 	u_long cr0;
142 
143 	if (again)
144 		return;
145 	again = 1;
146 
147 	/* Disable interrupts. */
148 	psl = x86_read_psl();
149 	x86_disable_intr();
150 
151 	/* Disable write protection in supervisor mode. */
152 	cr0 = rcr0();
153 	lcr0(cr0 & ~CR0_WP);
154 
155 	if (ncpu == 1) {
156 #ifndef LOCKDEBUG
157 		int i;
158 
159 		/* Uniprocessor: kill LOCK prefixes. */
160 		for (i = 0; x86_lockpatch[i] != 0; i++)
161 			patchbytes(x86_lockpatch[i], X86_NOP, -1);
162 		for (i = 0; atomic_lockpatch[i] != 0; i++)
163 			patchbytes(atomic_lockpatch[i], X86_NOP, -1);
164 		/* Uniprocessor: kill memory barriers. */
165 		patchfunc(
166 			x86_mb_nop, x86_mb_nop_end,
167 			mb_read, mb_read_end,
168 			NULL
169 		);
170 		patchfunc(
171 			x86_mb_nop, x86_mb_nop_end,
172 			mb_write, mb_write_end,
173 			NULL
174 		);
175 		patchfunc(
176 			x86_mb_nop, x86_mb_nop_end,
177 			mb_memory, mb_memory_end,
178 			NULL
179 		);
180 		/*
181 		 * Uniprocessor: kill kernel_lock.  Fill another
182 		 * 14 bytes of NOPs so not to confuse the decoder.
183 		 */
184 		patchbytes(_kernel_lock, X86_NOP, X86_RET);
185 		patchbytes(_kernel_unlock, X86_NOP, X86_RET);
186 		for (i = 2; i < 16; i++) {
187 			patchbytes((char *)_kernel_lock + i, X86_NOP, -1);
188 			patchbytes((char *)_kernel_unlock + i, X86_NOP, -1);
189 		}
190 #endif
191 	} else if ((cpu_feature & CPUID_SSE2) != 0) {
192 		/* Faster memory barriers. */
193 		patchfunc(
194 		    sse2_mb_read, sse2_mb_read_end,
195 		    mb_read, mb_read_end,
196 		    NULL
197 		);
198 		patchfunc(
199 		    sse2_mb_memory, sse2_mb_memory_end,
200 		    mb_memory, mb_memory_end,
201 		    NULL
202 		);
203 		patchfunc(
204 		    sse2_lfence, sse2_lfence_end,
205 		    membar_consumer, membar_consumer_end,
206 		    NULL
207 		);
208 		patchfunc(
209 		    sse2_mfence, sse2_mfence_end,
210 		    membar_sync, membar_sync_end,
211 		    NULL
212 		);
213 	}
214 
215 	if ((cpu_feature & CPUID_CX8) != 0) {
216 		/* Faster splx(), mutex_spin_exit(). */
217 		patchfunc(
218 		    cx8_spllower, cx8_spllower_end,
219 		    spllower, spllower_end,
220 		    cx8_spllower_patch
221 		);
222 #if defined(i386) && !defined(LOCKDEBUG)
223 		patchfunc(
224 		    i686_mutex_spin_exit, i686_mutex_spin_exit_end,
225 		    mutex_spin_exit, mutex_spin_exit_end,
226 		    i686_mutex_spin_exit_patch
227 		);
228 #endif
229 	}
230 
231 	/* Write back and invalidate cache, flush pipelines. */
232 	wbinvd();
233 	x86_flush();
234 	x86_write_psl(psl);
235 
236 	/* Re-enable write protection. */
237 	lcr0(cr0);
238 #endif	/* GPROF */
239 }
240