xref: /netbsd-src/sys/arch/x86/x86/mtrr_i686.c (revision 76646d9e03c9615328f50b26f06c9b57e12e065b)
1 /*	$NetBSD: mtrr_i686.c,v 1.32 2021/10/07 12:52:27 msaitoh Exp $ */
2 
3 /*-
4  * Copyright (c) 2000, 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Bill Sommerfeld.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mtrr_i686.c,v 1.32 2021/10/07 12:52:27 msaitoh Exp $");
34 
35 #include "opt_multiprocessor.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/kmem.h>
43 #include <sys/proc.h>
44 
45 #include <uvm/uvm_extern.h>
46 
47 #include <machine/specialreg.h>
48 #include <machine/cpuvar.h>
49 #include <machine/cpufunc.h>
50 #include <machine/mtrr.h>
51 
52 extern paddr_t avail_end;
53 
54 static void i686_mtrr_reload(int);
55 static void i686_mtrr_init_cpu(struct cpu_info *);
56 static void i686_mtrr_reload_cpu(struct cpu_info *);
57 static void i686_mtrr_clean(struct proc *p);
58 static int i686_mtrr_set(struct mtrr *, int *n, struct proc *p, int flags);
59 static int i686_mtrr_get(struct mtrr *, int *n, struct proc *p, int flags);
60 static void i686_mtrr_dump(const char *tag);
61 
62 static int i686_mtrr_validate(struct mtrr *, struct proc *p);
63 static void i686_soft2raw(void);
64 static void i686_raw2soft(void);
65 static void i686_mtrr_commit(void);
66 static int i686_mtrr_setone(struct mtrr *, struct proc *p);
67 static int i686_mtrr_conflict(uint8_t, uint8_t);
68 
69 static struct mtrr_state
70 mtrr_raw[] = {
71 	{ MSR_MTRRphysBase0, 0 },
72 	{ MSR_MTRRphysMask0, 0 },
73 	{ MSR_MTRRphysBase1, 0 },
74 	{ MSR_MTRRphysMask1, 0 },
75 	{ MSR_MTRRphysBase2, 0 },
76 	{ MSR_MTRRphysMask2, 0 },
77 	{ MSR_MTRRphysBase3, 0 },
78 	{ MSR_MTRRphysMask3, 0 },
79 	{ MSR_MTRRphysBase4, 0 },
80 	{ MSR_MTRRphysMask4, 0 },
81 	{ MSR_MTRRphysBase5, 0 },
82 	{ MSR_MTRRphysMask5, 0 },
83 	{ MSR_MTRRphysBase6, 0 },
84 	{ MSR_MTRRphysMask6, 0 },
85 	{ MSR_MTRRphysBase7, 0 },
86 	{ MSR_MTRRphysMask7, 0 },
87 	{ MSR_MTRRphysBase8, 0 },
88 	{ MSR_MTRRphysMask8, 0 },
89 	{ MSR_MTRRphysBase9, 0 },
90 	{ MSR_MTRRphysMask9, 0 },
91 	{ MSR_MTRRphysBase10, 0 },
92 	{ MSR_MTRRphysMask10, 0 },
93 	{ MSR_MTRRphysBase11, 0 },
94 	{ MSR_MTRRphysMask11, 0 },
95 	{ MSR_MTRRphysBase12, 0 },
96 	{ MSR_MTRRphysMask12, 0 },
97 	{ MSR_MTRRphysBase13, 0 },
98 	{ MSR_MTRRphysMask13, 0 },
99 	{ MSR_MTRRphysBase14, 0 },
100 	{ MSR_MTRRphysMask14, 0 },
101 	{ MSR_MTRRphysBase15, 0 },
102 	{ MSR_MTRRphysMask15, 0 },
103 	{ MSR_MTRRfix64K_00000, 0 },
104 	{ MSR_MTRRfix16K_80000, 0 },
105 	{ MSR_MTRRfix16K_A0000, 0 },
106 	{ MSR_MTRRfix4K_C0000, 0 },
107 	{ MSR_MTRRfix4K_C8000, 0 },
108 	{ MSR_MTRRfix4K_D0000, 0 },
109 	{ MSR_MTRRfix4K_D8000, 0 },
110 	{ MSR_MTRRfix4K_E0000, 0 },
111 	{ MSR_MTRRfix4K_E8000, 0 },
112 	{ MSR_MTRRfix4K_F0000, 0 },
113 	{ MSR_MTRRfix4K_F8000, 0 },
114 	{ MSR_MTRRdefType, 0 },
115 
116 };
117 
118 static const int nmtrr_raw = __arraycount(mtrr_raw);
119 static int i686_mtrr_vcnt = 0;
120 
121 static struct mtrr_state *mtrr_var_raw;
122 static struct mtrr_state *mtrr_fixed_raw;
123 
124 static struct mtrr *mtrr_fixed;
125 static struct mtrr *mtrr_var;
126 
127 const struct mtrr_funcs i686_mtrr_funcs = {
128 	i686_mtrr_init_cpu,
129 	i686_mtrr_reload_cpu,
130 	i686_mtrr_clean,
131 	i686_mtrr_set,
132 	i686_mtrr_get,
133 	i686_mtrr_commit,
134 	i686_mtrr_dump
135 };
136 
137 static kcpuset_t *		mtrr_waiting;
138 
139 static uint64_t			i686_mtrr_cap;
140 
141 static void
i686_mtrr_dump(const char * tag)142 i686_mtrr_dump(const char *tag)
143 {
144 	int i;
145 
146 	for (i = 0; i < nmtrr_raw; i++)
147 		printf("%s: %x: %016llx\n",
148 		    tag, mtrr_raw[i].msraddr,
149 		    (unsigned long long)rdmsr(mtrr_raw[i].msraddr));
150 }
151 
152 /*
153  * The Intel Archicture Software Developer's Manual volume 3 (systems
154  * programming) section 9.12.8 describes a simple 15-step process for
155  * updating the MTRR's on all processors on a multiprocessor system.
156  * If synch is nonzero, assume we're being called from an IPI handler,
157  * and synchronize with all running processors.
158  */
159 
160 /*
161  * 1. Broadcast to all processor to execute the following code sequence.
162  */
163 
164 static void
i686_mtrr_reload(int synch)165 i686_mtrr_reload(int synch)
166 {
167 	int i;
168 	/* XXX cr0 is 64-bit on amd64 too, but the upper bits are
169 	 * unused and must be zero so it does not matter too
170 	 * much. Need to change the prototypes of l/rcr0 too if you
171 	 * want to correct it. */
172 	uint32_t cr0;
173 	vaddr_t cr4;
174 	uint32_t origcr0;
175 	vaddr_t origcr4;
176 
177 	/*
178 	 * 2. Disable interrupts
179 	 */
180 	x86_disable_intr();
181 
182 #ifdef MULTIPROCESSOR
183 	if (synch) {
184 		/*
185 		 * 3. Wait for all processors to reach this point.
186 		 */
187 		kcpuset_atomic_set(mtrr_waiting, cpu_index(curcpu()));
188 		while (!kcpuset_match(mtrr_waiting, kcpuset_running)) {
189 			DELAY(10);
190 		}
191 	}
192 #endif
193 
194 	/*
195 	 * 4. Enter the no-fill cache mode (set the CD flag in CR0 to 1 and
196 	 * the NW flag to 0)
197 	 */
198 
199 	origcr0 = cr0 = rcr0();
200 	cr0 |= CR0_CD;
201 	cr0 &= ~CR0_NW;
202 	lcr0(cr0);
203 
204 	/*
205 	 * 5. Flush all caches using the WBINVD instruction.
206 	 */
207 
208 	wbinvd();
209 
210 	/*
211 	 * 6. Clear the PGE flag in control register CR4 (if set).
212 	 */
213 
214 	origcr4 = cr4 = rcr4();
215 	cr4 &= ~CR4_PGE;
216 	lcr4(cr4);
217 
218 	/*
219 	 * 7. Flush all TLBs (execute a MOV from control register CR3
220 	 * to another register and then a move from that register back
221 	 * to CR3)
222 	 */
223 
224 	tlbflush();
225 
226 	/*
227 	 * 8. Disable all range registers (by clearing the E flag in
228 	 * register MTRRdefType.  If only variable ranges are being
229 	 * modified, software may clear the valid bits for the
230 	 * affected register pairs instead.
231 	 */
232 	/* disable MTRRs (E = 0) */
233 	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_I686_ENABLE_MASK);
234 
235 	/*
236 	 * 9. Update the MTRR's
237 	 */
238 
239 	for (i = 0; i < nmtrr_raw; i++) {
240 		uint64_t val = mtrr_raw[i].msrval;
241 		uint32_t addr = mtrr_raw[i].msraddr;
242 		if (addr == 0)
243 			continue;
244 		if (addr == MSR_MTRRdefType)
245 			val &= ~MTRR_I686_ENABLE_MASK;
246 		wrmsr(addr, val);
247 	}
248 
249 	/*
250 	 * 10. Enable all range registers (by setting the E flag in
251 	 * register MTRRdefType).  If only variable-range registers
252 	 * were modified and their individual valid bits were cleared,
253 	 * then set the valid bits for the affected ranges instead.
254 	 */
255 
256 	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_I686_ENABLE_MASK);
257 
258 	/*
259 	 * 11. Flush all caches and all TLB's a second time. (repeat
260 	 * steps 5, 7)
261 	 */
262 
263 	wbinvd();
264 	tlbflush();
265 
266 	/*
267 	 * 12. Enter the normal cache mode to reenable caching (set the CD and
268 	 * NW flags in CR0 to 0)
269 	 */
270 
271 	lcr0(origcr0);
272 
273 	/*
274 	 * 13. Set the PGE flag in control register CR4, if previously
275 	 * cleared.
276 	 */
277 
278 	lcr4(origcr4);
279 
280 #ifdef MULTIPROCESSOR
281 	if (synch) {
282 		/*
283 		 * 14. Wait for all processors to reach this point.
284 		 */
285 		kcpuset_atomic_clear(mtrr_waiting, cpu_index(curcpu()));
286 		while (!kcpuset_iszero(mtrr_waiting)) {
287 			DELAY(10);
288 		}
289 	}
290 #endif
291 
292 	/*
293 	 * 15. Enable interrupts.
294 	 */
295 	x86_enable_intr();
296 }
297 
298 static void
i686_mtrr_reload_cpu(struct cpu_info * ci)299 i686_mtrr_reload_cpu(struct cpu_info *ci)
300 {
301 	i686_mtrr_reload(1);
302 }
303 
304 void
i686_mtrr_init_first(void)305 i686_mtrr_init_first(void)
306 {
307 	int i;
308 
309 	i686_mtrr_cap = rdmsr(MSR_MTRRcap);
310 	i686_mtrr_vcnt = i686_mtrr_cap & MTRR_I686_CAP_VCNT_MASK;
311 
312 	if (i686_mtrr_vcnt > MTRR_I686_NVAR_MAX)
313 		printf("%s: FIXME: more than %d MTRRs (%d)\n", __FILE__,
314 		    MTRR_I686_NVAR_MAX, i686_mtrr_vcnt);
315 	else if (i686_mtrr_vcnt < MTRR_I686_NVAR_MAX) {
316 		for (i = MTRR_I686_NVAR_MAX - i686_mtrr_vcnt; i; i--) {
317 			mtrr_raw[(MTRR_I686_NVAR_MAX - i) * 2].msraddr = 0;
318 			mtrr_raw[(MTRR_I686_NVAR_MAX - i) * 2 + 1].msraddr = 0;
319 		}
320 	}
321 
322 	for (i = 0; i < nmtrr_raw; i++) {
323 		if (mtrr_raw[i].msraddr)
324 			mtrr_raw[i].msrval = rdmsr(mtrr_raw[i].msraddr);
325 		else
326 			mtrr_raw[i].msrval = 0;
327 	}
328 #if 0
329 	mtrr_dump("init mtrr");
330 #endif
331 
332 	kcpuset_create(&mtrr_waiting, true);
333 
334 	mtrr_fixed =
335 	    kmem_zalloc(MTRR_I686_NFIXED_SOFT * sizeof(struct mtrr), KM_SLEEP);
336 
337 	if (i686_mtrr_vcnt) {
338 		mtrr_var =
339 		    kmem_zalloc(i686_mtrr_vcnt * sizeof(struct mtrr), KM_SLEEP);
340 	}
341 
342 	mtrr_var_raw = &mtrr_raw[0];
343 	mtrr_fixed_raw = &mtrr_raw[MTRR_I686_NVAR_MAX * 2];
344 	mtrr_funcs = &i686_mtrr_funcs;
345 
346 	i686_raw2soft();
347 }
348 
349 static void
i686_raw2soft(void)350 i686_raw2soft(void)
351 {
352 	int i, j, idx;
353 	struct mtrr *mtrrp;
354 	uint64_t base, mask;
355 
356 	for (i = 0; i < i686_mtrr_vcnt; i++) {
357 		mtrrp = &mtrr_var[i];
358 		memset(mtrrp, 0, sizeof *mtrrp);
359 		mask = mtrr_var_raw[i * 2 + 1].msrval;
360 		if (!mtrr_valid(mask))
361 			continue;
362 		base = mtrr_var_raw[i * 2].msrval;
363 		mtrrp->base = mtrr_base(base);
364 		mtrrp->type = mtrr_type(base);
365 		mtrrp->len = mtrr_len(mask);
366 		mtrrp->flags |= MTRR_VALID;
367 	}
368 
369 	idx = 0;
370 	base = 0;
371 	for (i = 0; i < MTRR_I686_NFIXED_64K; i++, idx++) {
372 		mask = mtrr_fixed_raw[idx].msrval;
373 		for (j = 0; j < 8; j++) {
374 			mtrrp = &mtrr_fixed[idx * 8 + j];
375 			mtrrp->owner = 0;
376 			mtrrp->flags = MTRR_FIXED | MTRR_VALID;
377 			mtrrp->base = base;
378 			mtrrp->len = 65536;
379 			mtrrp->type = mask & 0xff;
380 			mask >>= 8;
381 			base += 65536;
382 		}
383 	}
384 
385 	for (i = 0; i < MTRR_I686_NFIXED_16K; i++, idx++) {
386 		mask = mtrr_fixed_raw[idx].msrval;
387 		for (j = 0; j < 8; j++) {
388 			mtrrp = &mtrr_fixed[idx * 8 + j];
389 			mtrrp->owner = 0;
390 			mtrrp->flags = MTRR_FIXED | MTRR_VALID;
391 			mtrrp->base = base;
392 			mtrrp->len = 16384;
393 			mtrrp->type = mask & 0xff;
394 			mask >>= 8;
395 			base += 16384;
396 		}
397 	}
398 
399 	for (i = 0; i < MTRR_I686_NFIXED_4K; i++, idx++) {
400 		mask = mtrr_fixed_raw[idx].msrval;
401 		for (j = 0; j < 8; j++) {
402 			mtrrp = &mtrr_fixed[idx * 8 + j];
403 			mtrrp->owner = 0;
404 			mtrrp->flags = MTRR_FIXED | MTRR_VALID;
405 			mtrrp->base = base;
406 			mtrrp->len = 4096;
407 			mtrrp->type = mask & 0xff;
408 			mask >>= 8;
409 			base += 4096;
410 		}
411 	}
412 }
413 
414 static void
i686_soft2raw(void)415 i686_soft2raw(void)
416 {
417 	int i, idx, j;
418 	uint64_t val;
419 	struct mtrr *mtrrp;
420 
421 	for (i = 0; i < i686_mtrr_vcnt; i++) {
422 		mtrrp = &mtrr_var[i];
423 		mtrr_var_raw[i * 2].msrval = mtrr_base_value(mtrrp);
424 		mtrr_var_raw[i * 2 + 1].msrval = mtrr_mask_value(mtrrp);
425 		if (mtrrp->flags & MTRR_VALID)
426 			mtrr_var_raw[i * 2 + 1].msrval |= MTRR_I686_MASK_VALID;
427 	}
428 
429 	idx = 0;
430 	for (i = 0; i < MTRR_I686_NFIXED_64K; i++, idx++) {
431 		val = 0;
432 		for (j = 0; j < 8; j++) {
433 			mtrrp = &mtrr_fixed[idx * 8 + j];
434 			val |= ((uint64_t)mtrrp->type << (j << 3));
435 		}
436 		mtrr_fixed_raw[idx].msrval = val;
437 	}
438 
439 	for (i = 0; i < MTRR_I686_NFIXED_16K; i++, idx++) {
440 		val = 0;
441 		for (j = 0; j < 8; j++) {
442 			mtrrp = &mtrr_fixed[idx * 8 + j];
443 			val |= ((uint64_t)mtrrp->type << (j << 3));
444 		}
445 		mtrr_fixed_raw[idx].msrval = val;
446 	}
447 
448 	for (i = 0; i < MTRR_I686_NFIXED_4K; i++, idx++) {
449 		val = 0;
450 		for (j = 0; j < 8; j++) {
451 			mtrrp = &mtrr_fixed[idx * 8 + j];
452 			val |= ((uint64_t)mtrrp->type << (j << 3));
453 		}
454 		mtrr_fixed_raw[idx].msrval = val;
455 	}
456 }
457 
458 static void
i686_mtrr_init_cpu(struct cpu_info * ci)459 i686_mtrr_init_cpu(struct cpu_info *ci)
460 {
461 	i686_mtrr_reload(0);
462 #if 0
463 	mtrr_dump(device_xname(ci->ci_dev));
464 #endif
465 }
466 
467 static int
i686_mtrr_validate(struct mtrr * mtrrp,struct proc * p)468 i686_mtrr_validate(struct mtrr *mtrrp, struct proc *p)
469 {
470 	uint64_t high;
471 
472 	/*
473 	 * Must be at least page-aligned.
474 	 */
475 	if (mtrrp->base & 0xfff || mtrrp->len & 0xfff || mtrrp->len == 0)
476 		return EINVAL;
477 
478 	/*
479 	 * Private mappings are bound to a process.
480 	 */
481 	if (p == NULL && (mtrrp->flags & MTRR_PRIVATE))
482 		return EINVAL;
483 
484 	high = mtrrp->base + mtrrp->len;
485 
486 	/*
487 	 * Check for bad types.
488 	 */
489 	if ((mtrrp->type == MTRR_TYPE_UNDEF1 || mtrrp->type == MTRR_TYPE_UNDEF2
490 	    || mtrrp->type > MTRR_TYPE_WB) && (mtrrp->flags & MTRR_VALID))
491 		return EINVAL;
492 
493 	/*
494 	 * If write-combining is requested, make sure that the WC feature
495 	 * is supported by the processor.
496 	 */
497 	if (mtrrp->type == MTRR_TYPE_WC &&
498 	    !(i686_mtrr_cap & MTRR_I686_CAP_WC_MASK))
499 		return ENODEV;
500 
501 	/*
502 	 * Only use fixed ranges < 1M.
503 	 */
504 	if ((mtrrp->flags & MTRR_FIXED) && high > 0x100000)
505 		return EINVAL;
506 
507 	/*
508 	 * Check for the right alignment and size for fixed ranges.
509 	 * The requested range may span several actual MTRRs, but
510 	 * it must be properly aligned.
511 	 */
512 	if (mtrrp->flags & MTRR_FIXED) {
513 		if (mtrrp->base < MTRR_I686_16K_START) {
514 			if ((mtrrp->base  & 0xffff) != 0)
515 				return EINVAL;
516 		} else if (mtrrp->base < MTRR_I686_4K_START) {
517 			if ((mtrrp->base & 0x3fff) != 0)
518 				return EINVAL;
519 		} else {
520 			if ((mtrrp->base  & 0xfff) != 0)
521 				return EINVAL;
522 		}
523 
524 		if (high < MTRR_I686_16K_START) {
525 			if ((high  & 0xffff) != 0)
526 				return EINVAL;
527 		} else if (high < MTRR_I686_4K_START) {
528 			if ((high & 0x3fff) != 0)
529 				return EINVAL;
530 		} else {
531 			if ((high & 0xfff) != 0)
532 				return EINVAL;
533 		}
534 	}
535 
536 	return 0;
537 }
538 
539 /*
540  * Try to find a non-conflicting match on physical MTRRs for the
541  * requested range. For fixed ranges, more than one actual MTRR
542  * may be used.
543  */
544 static int
i686_mtrr_setone(struct mtrr * mtrrp,struct proc * p)545 i686_mtrr_setone(struct mtrr *mtrrp, struct proc *p)
546 {
547 	int i, error;
548 	struct mtrr *lowp, *highp, *mp, *freep;
549 	uint64_t low, high, curlow, curhigh;
550 
551 	/*
552 	 * If explicitly requested, or if the range lies below 1M,
553 	 * try the fixed range MTRRs.
554 	 */
555 	if (mtrrp->flags & MTRR_FIXED ||
556 	    (mtrrp->base + mtrrp->len) <= 0x100000) {
557 		lowp = highp = NULL;
558 		for (i = 0; i < MTRR_I686_NFIXED_SOFT; i++) {
559 			if (mtrr_fixed[i].base == mtrrp->base + mtrrp->len) {
560 				highp = &mtrr_fixed[i];
561 				break;
562 			}
563 			if (mtrr_fixed[i].base == mtrrp->base) {
564 				lowp = &mtrr_fixed[i];
565 				/*
566 				 * If the requested upper bound is the 1M
567 				 * limit, search no further.
568 				 */
569 				if ((mtrrp->base + mtrrp->len) == 0x100000) {
570 					highp =
571 					    &mtrr_fixed[MTRR_I686_NFIXED_SOFT];
572 					break;
573 				} else {
574 					highp = &mtrr_fixed[i + 1];
575 					continue;
576 				}
577 			}
578 		}
579 		if (lowp == NULL || highp == NULL)
580 			panic("mtrr: fixed register screwup");
581 		error = 0;
582 		for (mp = lowp; mp < highp; mp++) {
583 			if ((mp->flags & MTRR_PRIVATE) && p != NULL
584 			     && p->p_pid != mp->owner) {
585 				error = EBUSY;
586 				break;
587 			}
588 		}
589 		if (error != 0) {
590 			if (mtrrp->flags & MTRR_FIXED)
591 				return error;
592 		} else {
593 			for (mp = lowp; mp < highp; mp++) {
594 				/*
595 				 * Can't invalidate fixed ranges, so
596 				 * just reset the 'private' flag,
597 				 * making the range available for
598 				 * changing again.
599 				 */
600 				if (!(mtrrp->flags & MTRR_VALID)) {
601 					mp->flags &= ~MTRR_PRIVATE;
602 					continue;
603 				}
604 				mp->type = mtrrp->type;
605 				if (mtrrp->flags & MTRR_PRIVATE) {
606 					/*
607 					 * Private mappings are bound to a
608 					 * process. This has been checked in
609 					 * i686_mtrr_validate()
610 					 */
611 					mp->flags |= MTRR_PRIVATE;
612 					mp->owner = p->p_pid;
613 				}
614 			}
615 			return 0;
616 		}
617 	}
618 
619 	/*
620 	 * Try one of the variable range registers.
621 	 * XXX could be more sophisticated here by merging ranges.
622 	 */
623 	low = mtrrp->base;
624 	high = low + mtrrp->len - 1;
625 	freep = NULL;
626 	for (i = 0; i < i686_mtrr_vcnt; i++) {
627 		if (!(mtrr_var[i].flags & MTRR_VALID)) {
628 			freep = &mtrr_var[i];
629 			continue;
630 		}
631 		curlow = mtrr_var[i].base;
632 		curhigh = curlow + mtrr_var[i].len - 1;
633 		if (low == curlow && high == curhigh &&
634 		    (!(mtrr_var[i].flags & MTRR_PRIVATE) ||
635 		     ((mtrrp->flags & MTRR_PRIVATE) && (p != NULL) &&
636 		      (mtrr_var[i].owner == p->p_pid)))) {
637 			freep = &mtrr_var[i];
638 			break;
639 		}
640 		if (((high >= curlow && high < curhigh) ||
641 		    (low >= curlow && low < curhigh)) &&
642 		    (i686_mtrr_conflict(mtrr_var[i].type, mtrrp->type) ||
643 		     ((mtrr_var[i].flags & MTRR_PRIVATE) &&
644 		      (!(mtrrp->flags & MTRR_PRIVATE) || (p == NULL) ||
645 		       (mtrr_var[i].owner != p->p_pid))))) {
646 			return EBUSY;
647 		}
648 	}
649 	if (freep == NULL)
650 		return EBUSY;
651 	mtrrp->flags &= ~MTRR_CANTSET;
652 	*freep = *mtrrp;
653 	freep->owner = (mtrrp->flags & MTRR_PRIVATE) ? p->p_pid : 0;
654 
655 	return 0;
656 }
657 
658 static int
i686_mtrr_conflict(uint8_t type1,uint8_t type2)659 i686_mtrr_conflict(uint8_t type1, uint8_t type2)
660 {
661 	if (type1 == MTRR_TYPE_UC || type2 == MTRR_TYPE_UC)
662 		return 0;
663 	if ((type1 == MTRR_TYPE_WT && type2 == MTRR_TYPE_WB) ||
664 	    (type1 == MTRR_TYPE_WB && type2 == MTRR_TYPE_WT))
665 		return 0;
666 	return 1;
667 }
668 
669 static void
i686_mtrr_clean(struct proc * p)670 i686_mtrr_clean(struct proc *p)
671 {
672 	int i;
673 
674 	for (i = 0; i < MTRR_I686_NFIXED_SOFT; i++) {
675 		if ((mtrr_fixed[i].flags & MTRR_PRIVATE) &&
676 		    (mtrr_fixed[i].owner == p->p_pid))
677 			mtrr_fixed[i].flags &= ~MTRR_PRIVATE;
678 	}
679 
680 	for (i = 0; i < i686_mtrr_vcnt; i++) {
681 		if ((mtrr_var[i].flags & MTRR_PRIVATE) &&
682 		    (mtrr_var[i].owner == p->p_pid))
683 			mtrr_var[i].flags &= ~(MTRR_PRIVATE | MTRR_VALID);
684 	}
685 
686 	i686_mtrr_commit();
687 }
688 
689 static int
i686_mtrr_set(struct mtrr * mtrrp,int * n,struct proc * p,int flags)690 i686_mtrr_set(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
691 {
692 	int i, error;
693 	struct mtrr mtrr;
694 
695 	if (*n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
696 		*n = 0;
697 		return EINVAL;
698 	}
699 
700 	error = 0;
701 	for (i = 0; i < *n; i++) {
702 		if (flags & MTRR_GETSET_USER) {
703 			error = copyin(&mtrrp[i], &mtrr, sizeof mtrr);
704 			if (error != 0)
705 				break;
706 		} else
707 			mtrr = mtrrp[i];
708 		error = i686_mtrr_validate(&mtrr, p);
709 		if (error != 0)
710 			break;
711 		error = i686_mtrr_setone(&mtrr, p);
712 		if (error != 0)
713 			break;
714 		if (mtrr.flags & MTRR_PRIVATE)
715 			p->p_md.md_flags |= MDP_USEDMTRR;
716 	}
717 	*n = i;
718 	return error;
719 }
720 
721 static int
i686_mtrr_get(struct mtrr * mtrrp,int * n,struct proc * p,int flags)722 i686_mtrr_get(struct mtrr *mtrrp, int *n, struct proc *p, int flags)
723 {
724 	int idx, i, error;
725 
726 	if (mtrrp == NULL) {
727 		*n = MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX;
728 		return 0;
729 	}
730 
731 	error = 0;
732 
733 	for (idx = i = 0; i < MTRR_I686_NFIXED_SOFT && idx < *n; idx++, i++) {
734 		if (flags & MTRR_GETSET_USER) {
735 			error = copyout(&mtrr_fixed[i], &mtrrp[idx],
736 					sizeof *mtrrp);
737 			if (error != 0)
738 				break;
739 		} else
740 			memcpy(&mtrrp[idx], &mtrr_fixed[i], sizeof *mtrrp);
741 	}
742 	if (error != 0) {
743 		*n = idx;
744 		return error;
745 	}
746 
747 	for (i = 0; i < i686_mtrr_vcnt && idx < *n; idx++, i++) {
748 		if (flags & MTRR_GETSET_USER) {
749 			error = copyout(&mtrr_var[i], &mtrrp[idx],
750 					sizeof *mtrrp);
751 			if (error != 0)
752 				break;
753 		} else
754 			memcpy(&mtrrp[idx], &mtrr_var[i], sizeof *mtrrp);
755 	}
756 	*n = idx;
757 	return error;
758 }
759 
760 static void
i686_mtrr_commit(void)761 i686_mtrr_commit(void)
762 {
763 
764 	i686_soft2raw();
765 	kpreempt_disable();
766 #ifdef MULTIPROCESSOR
767 	x86_broadcast_ipi(X86_IPI_MTRR);
768 #endif
769 	i686_mtrr_reload(1);
770 	kpreempt_enable();
771 }
772