xref: /openbsd-src/sys/arch/i386/i386/i686_mem.c (revision f4e7063748a2ac72b2bab4389c0a7efc72d82189)
1 /* $OpenBSD: i686_mem.c,v 1.20 2023/01/30 10:49:05 jsg Exp $ */
2 /*
3  * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8 1999/10/12 22:53:05 green Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/memrange.h>
34 
35 #include <machine/cpufunc.h>
36 #include <machine/specialreg.h>
37 
38 /*
39  * This code implements a set of MSRs known as MTRR which define caching
40  * modes/behavior for various memory ranges.
41  */
42 
43 char *mem_owner_bios = "BIOS";
44 
45 #define MR_FIXMTRR	(1<<0)
46 
47 #define mrwithin(mr, a) \
48     (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
49 #define mroverlap(mra, mrb) \
50     (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
51 
52 #define mrvalid(base, len) 						\
53     ((!(base & ((1 << 12) - 1))) && 	/* base is multiple of 4k */	\
54      ((len) >= (1 << 12)) && 		/* length is >= 4k */		\
55      powerof2((len)) && 		/* ... and power of two */	\
56      !((base) & ((len) - 1)))		/* range is not discontiuous */
57 
58 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | \
59 	((new) & MDF_ATTRMASK))
60 
61 #define FIXTOP	((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + \
62 	(MTRR_N4K * 0x1000))
63 
64 void	mrinit(struct mem_range_softc *sc);
65 int	mrset(struct mem_range_softc *sc,
66 	    struct mem_range_desc *mrd, int *arg);
67 void	mrinit_cpu(struct mem_range_softc *sc);
68 void	mrreload_cpu(struct mem_range_softc *sc);
69 
70 struct mem_range_ops mrops = {
71 	mrinit,
72 	mrset,
73 	mrinit_cpu,
74 	mrreload_cpu
75 };
76 
77 u_int64_t	mtrrcap, mtrrdef;
78 u_int64_t	mtrrmask = 0x0000000ffffff000ULL;
79 
80 struct mem_range_desc	*mem_range_match(struct mem_range_softc *sc,
81 			     struct mem_range_desc *mrd);
82 void			 mrfetch(struct mem_range_softc *sc);
83 int			 mtrrtype(u_int64_t flags);
84 int			 mrt2mtrr(u_int64_t flags);
85 int			 mtrr2mrt(int val);
86 int			 mtrrconflict(u_int64_t flag1, u_int64_t flag2);
87 void			 mrstore(struct mem_range_softc *sc);
88 void			 mrstoreone(struct mem_range_softc *sc);
89 struct mem_range_desc	*mtrrfixsearch(struct mem_range_softc *sc,
90 			     u_int64_t addr);
91 int			 mrsetlow(struct mem_range_softc *sc,
92 			     struct mem_range_desc *mrd, int *arg);
93 int			 mrsetvariable(struct mem_range_softc *sc,
94 			     struct mem_range_desc *mrd, int *arg);
95 
96 /* MTRR type to memory range type conversion */
97 int mtrrtomrt[] = {
98 	MDF_UNCACHEABLE,
99 	MDF_WRITECOMBINE,
100 	MDF_UNKNOWN,
101 	MDF_UNKNOWN,
102 	MDF_WRITETHROUGH,
103 	MDF_WRITEPROTECT,
104 	MDF_WRITEBACK
105 };
106 
107 int
mtrr2mrt(int val)108 mtrr2mrt(int val)
109 {
110 	if (val < 0 || val >= nitems(mtrrtomrt))
111 		return MDF_UNKNOWN;
112 	return mtrrtomrt[val];
113 }
114 
115 /*
116  * MTRR conflicts. Writeback and uncachable may overlap.
117  */
118 int
mtrrconflict(u_int64_t flag1,u_int64_t flag2)119 mtrrconflict(u_int64_t flag1, u_int64_t flag2)
120 {
121 	flag1 &= MDF_ATTRMASK;
122 	flag2 &= MDF_ATTRMASK;
123 	if (flag1 == flag2 ||
124 	    (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
125 	    (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
126 		return 0;
127 	return 1;
128 }
129 
130 /*
131  * Look for an exactly-matching range.
132  */
133 struct mem_range_desc *
mem_range_match(struct mem_range_softc * sc,struct mem_range_desc * mrd)134 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
135 {
136 	struct mem_range_desc	*cand;
137 	int			 i;
138 
139 	for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
140 		if ((cand->mr_base == mrd->mr_base) &&
141 		    (cand->mr_len == mrd->mr_len))
142 			return(cand);
143 	return(NULL);
144 }
145 
146 /*
147  * Fetch the current mtrr settings from the current CPU (assumed to all
148  * be in sync in the SMP case).  Note that if we are here, we assume
149  * that MTRRs are enabled, and we may or may not have fixed MTRRs.
150  */
151 void
mrfetch(struct mem_range_softc * sc)152 mrfetch(struct mem_range_softc *sc)
153 {
154 	struct mem_range_desc	*mrd;
155 	u_int64_t		 msrv;
156 	int			 i, j, msr, mrt;
157 
158 	mrd = sc->mr_desc;
159 
160 	/* We should never be fetching MTRRs from an AP */
161 	KASSERT(CPU_IS_PRIMARY(curcpu()));
162 
163 	/* Get fixed-range MTRRs, if the CPU supports them */
164 	if (sc->mr_cap & MR_FIXMTRR) {
165 		msr = MSR_MTRRfix64K_00000;
166 		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
167 			msrv = rdmsr(msr);
168 			for (j = 0; j < 8; j++, mrd++) {
169 				mrt = mtrr2mrt(msrv & 0xff);
170 				if (mrt == MDF_UNKNOWN)
171 					mrt = MDF_UNCACHEABLE;
172 				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
173 					mrt | MDF_ACTIVE;
174 				if (mrd->mr_owner[0] == 0)
175 					strlcpy(mrd->mr_owner, mem_owner_bios,
176 					    sizeof(mrd->mr_owner));
177 				msrv = msrv >> 8;
178 			}
179 		}
180 
181 		msr = MSR_MTRRfix16K_80000;
182 		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
183 			msrv = rdmsr(msr);
184 			for (j = 0; j < 8; j++, mrd++) {
185 				mrt = mtrr2mrt(msrv & 0xff);
186 				if (mrt == MDF_UNKNOWN)
187 					mrt = MDF_UNCACHEABLE;
188 				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
189 					mrt | MDF_ACTIVE;
190 				if (mrd->mr_owner[0] == 0)
191 					strlcpy(mrd->mr_owner, mem_owner_bios,
192 					    sizeof(mrd->mr_owner));
193 				msrv = msrv >> 8;
194 			}
195 		}
196 
197 		msr = MSR_MTRRfix4K_C0000;
198 		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
199 			msrv = rdmsr(msr);
200 			for (j = 0; j < 8; j++, mrd++) {
201 				mrt = mtrr2mrt(msrv & 0xff);
202 				if (mrt == MDF_UNKNOWN)
203 					mrt = MDF_UNCACHEABLE;
204 				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
205 					mrt | MDF_ACTIVE;
206 				if (mrd->mr_owner[0] == 0)
207 					strlcpy(mrd->mr_owner, mem_owner_bios,
208 					    sizeof(mrd->mr_owner));
209 				msrv = msrv >> 8;
210 			}
211 		}
212 	}
213 
214 	/* Get remainder which must be variable MTRRs */
215 	msr = MSR_MTRRvarBase;
216 	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
217 		msrv = rdmsr(msr);
218 		mrt = mtrr2mrt(msrv & 0xff);
219 		if (mrt == MDF_UNKNOWN)
220 			mrt = MDF_UNCACHEABLE;
221 		mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | mrt;
222 		mrd->mr_base = msrv & mtrrmask;
223 		msrv = rdmsr(msr + 1);
224 		mrd->mr_flags = (msrv & 0x800) ?
225 			(mrd->mr_flags | MDF_ACTIVE) :
226 			(mrd->mr_flags & ~MDF_ACTIVE);
227 		/* Compute the range from the mask. Ick. */
228 		mrd->mr_len = (~(msrv & mtrrmask) & mtrrmask) + 0x1000;
229 		if (!mrvalid(mrd->mr_base, mrd->mr_len))
230 			mrd->mr_flags |= MDF_BOGUS;
231 		/* If unclaimed and active, must be the BIOS */
232 		if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
233 			strlcpy(mrd->mr_owner, mem_owner_bios,
234 			    sizeof(mrd->mr_owner));
235 	}
236 }
237 
238 /*
239  * Return the MTRR memory type matching a region's flags
240  */
241 int
mtrrtype(u_int64_t flags)242 mtrrtype(u_int64_t flags)
243 {
244 	int i;
245 
246 	flags &= MDF_ATTRMASK;
247 
248 	for (i = 0; i < nitems(mtrrtomrt); i++) {
249 		if (mtrrtomrt[i] == MDF_UNKNOWN)
250 			continue;
251 		if (flags == mtrrtomrt[i])
252 			return(i);
253 	}
254 	return MDF_UNCACHEABLE;
255 }
256 
257 int
mrt2mtrr(u_int64_t flags)258 mrt2mtrr(u_int64_t flags)
259 {
260 	int val;
261 
262 	val = mtrrtype(flags);
263 
264 	return val & 0xff;
265 }
266 
267 /*
268  * Update running CPU(s) MTRRs to match the ranges in the descriptor
269  * list.
270  *
271  * XXX Must be called with interrupts enabled.
272  */
273 void
mrstore(struct mem_range_softc * sc)274 mrstore(struct mem_range_softc *sc)
275 {
276 	u_long s;
277 
278 	s = intr_disable();			/* disable interrupts */
279 #ifdef MULTIPROCESSOR
280 	i386_broadcast_ipi(I386_IPI_MTRR);
281 #endif
282 	mrstoreone(sc);
283 	intr_restore(s);
284 }
285 
286 /*
287  * Update the current CPU's MTRRs with those represented in the
288  * descriptor list.  Note that we do this wholesale rather than
289  * just stuffing one entry; this is simpler (but slower, of course).
290  */
291 void
mrstoreone(struct mem_range_softc * sc)292 mrstoreone(struct mem_range_softc *sc)
293 {
294 	struct mem_range_desc	*mrd;
295 	u_int64_t		 msrv;
296 	int			 i, j, msr;
297 	u_int			 cr4save;
298 
299 	mrd = sc->mr_desc;
300 
301 	cr4save = rcr4();	/* save cr4 */
302 	if (cr4save & CR4_PGE)
303 		lcr4(cr4save & ~CR4_PGE);
304 
305 	/* Flush caches, then disable caches, then disable MTRRs */
306 	wbinvd();
307 	lcr0((rcr0() & ~CR0_NW) | CR0_CD);
308 	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRRdefType_ENABLE);
309 
310 	/* Set fixed-range MTRRs */
311 	if (sc->mr_cap & MR_FIXMTRR) {
312 		msr = MSR_MTRRfix64K_00000;
313 		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
314 			msrv = 0;
315 			for (j = 7; j >= 0; j--) {
316 				msrv = msrv << 8;
317 				msrv |= mrt2mtrr((mrd + j)->mr_flags);
318 			}
319 			wrmsr(msr, msrv);
320 			mrd += 8;
321 		}
322 
323 		msr = MSR_MTRRfix16K_80000;
324 		for (i = 0, msrv = 0; i < (MTRR_N16K / 8); i++, msr++) {
325 			for (j = 7; j >= 0; j--) {
326 				msrv = msrv << 8;
327 				msrv |= mrt2mtrr((mrd + j)->mr_flags);
328 			}
329 			wrmsr(msr, msrv);
330 			mrd += 8;
331 		}
332 
333 		msr = MSR_MTRRfix4K_C0000;
334 		for (i = 0, msrv = 0; i < (MTRR_N4K / 8); i++, msr++) {
335 			for (j = 7; j >= 0; j--) {
336 				msrv = msrv << 8;
337 				msrv |= mrt2mtrr((mrd + j)->mr_flags);
338 			}
339 			wrmsr(msr, msrv);
340 			mrd += 8;
341 		}
342 	}
343 
344 	/* Set remainder which must be variable MTRRs */
345 	msr = MSR_MTRRvarBase;
346 	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
347 		if (mrd->mr_flags & MDF_ACTIVE) {
348 			msrv = mrd->mr_base & mtrrmask;
349 			msrv |= mrt2mtrr(mrd->mr_flags);
350 		} else
351 			msrv = 0;
352 
353 		wrmsr(msr, msrv);
354 
355 		/* mask/active register */
356 		if (mrd->mr_flags & MDF_ACTIVE) {
357 			msrv = 0x800 | (~(mrd->mr_len - 1) & mtrrmask);
358 		} else
359 			msrv = 0;
360 
361 		wrmsr(msr + 1, msrv);
362 	}
363 
364 	/* Re-enable caches and MTRRs */
365 	wrmsr(MSR_MTRRdefType, mtrrdef | MTRRdefType_ENABLE);
366 	lcr0(rcr0() & ~(CR0_CD | CR0_NW));
367 	lcr4(cr4save);
368 }
369 
370 /*
371  * Hunt for the fixed MTRR referencing (addr)
372  */
373 struct mem_range_desc *
mtrrfixsearch(struct mem_range_softc * sc,u_int64_t addr)374 mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
375 {
376 	struct mem_range_desc *mrd;
377 	int			i;
378 
379 	for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
380 		if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
381 			return(mrd);
382 	return(NULL);
383 }
384 
385 /*
386  * Try to satisfy the given range request by manipulating the fixed MTRRs that
387  * cover low memory.
388  *
389  * Note that we try to be generous here; we'll bloat the range out to the
390  * next higher/lower boundary to avoid the consumer having to know too much
391  * about the mechanisms here.
392  */
393 int
mrsetlow(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)394 mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
395 {
396 	struct mem_range_desc	*first_md, *last_md, *curr_md;
397 
398 	/* range check */
399 	if (((first_md = mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
400 	    ((last_md = mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
401 		return(EINVAL);
402 
403 	/* check we aren't doing something risky */
404 	if (!(mrd->mr_flags & MDF_FORCE))
405 		for (curr_md = first_md; curr_md <= last_md; curr_md++) {
406 			if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
407 				return (EACCES);
408 		}
409 
410 	/* set flags, clear set-by-firmware flag */
411 	for (curr_md = first_md; curr_md <= last_md; curr_md++) {
412 		curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
413 		bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
414 	}
415 
416 	return(0);
417 }
418 
419 
420 /*
421  * Modify/add a variable MTRR to satisfy the request.
422  */
423 int
mrsetvariable(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)424 mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
425 {
426 	struct mem_range_desc	*curr_md, *free_md;
427 	int			 i;
428 
429 	/*
430 	 * Scan the currently active variable descriptors, look for
431 	 * one we exactly match (straight takeover) and for possible
432 	 * accidental overlaps.
433 	 * Keep track of the first empty variable descriptor in case we
434 	 * can't perform a takeover.
435 	 */
436 	i = (sc->mr_cap & MR_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
437 	curr_md = sc->mr_desc + i;
438 	free_md = NULL;
439 	for (; i < sc->mr_ndesc; i++, curr_md++) {
440 		if (curr_md->mr_flags & MDF_ACTIVE) {
441 			/* exact match? */
442 			if ((curr_md->mr_base == mrd->mr_base) &&
443 			    (curr_md->mr_len == mrd->mr_len)) {
444 				/* check we aren't doing something risky */
445 				if (!(mrd->mr_flags & MDF_FORCE) &&
446 				    ((curr_md->mr_flags & MDF_ATTRMASK)
447 				    == MDF_UNKNOWN))
448 					return (EACCES);
449 				/* Ok, just hijack this entry */
450 				free_md = curr_md;
451 				break;
452 			}
453 			/* non-exact overlap ? */
454 			if (mroverlap(curr_md, mrd)) {
455 				/* between conflicting region types? */
456 				if (mtrrconflict(curr_md->mr_flags,
457 						      mrd->mr_flags))
458 					return(EINVAL);
459 			}
460 		} else if (free_md == NULL) {
461 			free_md = curr_md;
462 		}
463 	}
464 	/* got somewhere to put it? */
465 	if (free_md == NULL)
466 		return(ENOSPC);
467 
468 	/* Set up new descriptor */
469 	free_md->mr_base = mrd->mr_base;
470 	free_md->mr_len = mrd->mr_len;
471 	free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
472 	bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
473 	return(0);
474 }
475 
476 /*
477  * Handle requests to set memory range attributes by manipulating MTRRs.
478  */
479 int
mrset(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)480 mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
481 {
482 	struct mem_range_desc	*targ;
483 	int			 error = 0;
484 
485 	switch(*arg) {
486 	case MEMRANGE_SET_UPDATE:
487 		/* make sure that what's being asked for is possible */
488 		if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
489 		    mtrrtype(mrd->mr_flags) == -1)
490 			return(EINVAL);
491 
492 		/* are the "low memory" conditions applicable? */
493 		if ((sc->mr_cap & MR_FIXMTRR) &&
494 		    ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
495 			if ((error = mrsetlow(sc, mrd, arg)) != 0)
496 				return(error);
497 		} else {
498 			/* it's time to play with variable MTRRs */
499 			if ((error = mrsetvariable(sc, mrd, arg)) != 0)
500 				return(error);
501 		}
502 		break;
503 
504 	case MEMRANGE_SET_REMOVE:
505 		if ((targ = mem_range_match(sc, mrd)) == NULL)
506 			return(ENOENT);
507 		if (targ->mr_flags & MDF_FIXACTIVE)
508 			return(EPERM);
509 		targ->mr_flags &= ~MDF_ACTIVE;
510 		targ->mr_owner[0] = 0;
511 		break;
512 
513 	default:
514 		return(EOPNOTSUPP);
515 	}
516 
517 	/* update the hardware */
518 	mrstore(sc);
519 	return(0);
520 }
521 
522 /*
523  * Work out how many ranges we support, initialise storage for them,
524  * fetch the initial settings.
525  */
526 void
mrinit(struct mem_range_softc * sc)527 mrinit(struct mem_range_softc *sc)
528 {
529 	struct mem_range_desc	*mrd;
530 	uint32_t		 regs[4];
531 	int			 nmdesc = 0;
532 	int			 i;
533 
534 	mtrrcap = rdmsr(MSR_MTRRcap);
535 	mtrrdef = rdmsr(MSR_MTRRdefType);
536 
537 	/* For now, bail out if MTRRs are not enabled */
538 	if (!(mtrrdef & MTRRdefType_ENABLE)) {
539 		printf("mtrr: CPU supports MTRRs but not enabled by BIOS\n");
540 		return;
541 	}
542 	nmdesc = mtrrcap & 0xff;
543 	printf("mtrr: Pentium Pro MTRR support, %d var ranges", nmdesc);
544 
545 	/* If fixed MTRRs supported and enabled */
546 	if ((mtrrcap & MTRRcap_FIXED) &&
547 	    (mtrrdef & MTRRdefType_FIXED_ENABLE)) {
548 		sc->mr_cap = MR_FIXMTRR;
549 		nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
550 		printf(", %d fixed ranges", MTRR_N64K + MTRR_N16K + MTRR_N4K);
551 	}
552 
553 	printf("\n");
554 
555 	sc->mr_desc = mallocarray(nmdesc, sizeof(struct mem_range_desc),
556 	     M_MEMDESC, M_WAITOK|M_ZERO);
557 	sc->mr_ndesc = nmdesc;
558 
559 	mrd = sc->mr_desc;
560 
561 	/* Populate the fixed MTRR entries' base/length */
562 	if (sc->mr_cap & MR_FIXMTRR) {
563 		for (i = 0; i < MTRR_N64K; i++, mrd++) {
564 			mrd->mr_base = i * 0x10000;
565 			mrd->mr_len = 0x10000;
566 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
567 		}
568 
569 		for (i = 0; i < MTRR_N16K; i++, mrd++) {
570 			mrd->mr_base = i * 0x4000 + 0x80000;
571 			mrd->mr_len = 0x4000;
572 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
573 		}
574 
575 		for (i = 0; i < MTRR_N4K; i++, mrd++) {
576 			mrd->mr_base = i * 0x1000 + 0xc0000;
577 			mrd->mr_len = 0x1000;
578 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
579 		}
580 	}
581 
582 	/*
583 	 * Fetch maximum physical address size supported by the
584 	 * processor as supported by CPUID leaf function 0x80000008.
585 	 * If CPUID does not support leaf function 0x80000008, use the
586 	 * default 36-bit address size.
587 	 */
588 	CPUID(0x80000000, regs[0], regs[1], regs[2], regs[3]);
589 	if (regs[0] >= 0x80000008) {
590 		CPUID(0x80000008, regs[0], regs[1], regs[2], regs[3]);
591 		if (regs[0] & 0xff) {
592 			mtrrmask = (1ULL << (regs[0] & 0xff)) - 1;
593 			mtrrmask &= ~0x0000000000000fffULL;
594 		}
595 	}
596 
597 	/*
598 	 * Get current settings, anything set now is considered to have
599 	 * been set by the firmware.
600 	 */
601 	mrfetch(sc);
602 	mrd = sc->mr_desc;
603 	for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
604 		if (mrd->mr_flags & MDF_ACTIVE)
605 			mrd->mr_flags |= MDF_FIRMWARE;
606 	}
607 }
608 
609 /*
610  * Initialise MTRRs on a cpu from the software state.
611  */
612 void
mrinit_cpu(struct mem_range_softc * sc)613 mrinit_cpu(struct mem_range_softc *sc)
614 {
615 	mrstoreone(sc); /* set MTRRs to match BSP */
616 }
617 
618 void
mrreload_cpu(struct mem_range_softc * sc)619 mrreload_cpu(struct mem_range_softc *sc)
620 {
621 	u_long s;
622 
623 	s = intr_disable();
624 	mrstoreone(sc); /* set MTRRs to match BSP */
625 	intr_restore(s);
626 }
627