1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28
29
30 /*
31 * VM - Hardware Address Translation management for i386 and amd64
32 *
33 * Implementation of the interfaces described in <common/vm/hat.h>
34 *
35 * Nearly all the details of how the hardware is managed should not be
36 * visible outside this layer except for misc. machine specific functions
37 * that work in conjunction with this code.
38 *
39 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
40 */
41
42 #include <sys/machparam.h>
43 #include <sys/machsystm.h>
44 #include <sys/mman.h>
45 #include <sys/types.h>
46 #include <sys/systm.h>
47 #include <sys/cpuvar.h>
48 #include <sys/thread.h>
49 #include <sys/proc.h>
50 #include <sys/cpu.h>
51 #include <sys/kmem.h>
52 #include <sys/disp.h>
53 #include <sys/shm.h>
54 #include <sys/sysmacros.h>
55 #include <sys/machparam.h>
56 #include <sys/vmem.h>
57 #include <sys/vmsystm.h>
58 #include <sys/promif.h>
59 #include <sys/var.h>
60 #include <sys/x86_archext.h>
61 #include <sys/atomic.h>
62 #include <sys/bitmap.h>
63 #include <sys/controlregs.h>
64 #include <sys/bootconf.h>
65 #include <sys/bootsvcs.h>
66 #include <sys/bootinfo.h>
67 #include <sys/archsystm.h>
68
69 #include <vm/seg_kmem.h>
70 #include <vm/hat_i86.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/page.h>
74 #include <vm/seg_kp.h>
75 #include <vm/seg_kpm.h>
76 #include <vm/vm_dep.h>
77 #ifdef __xpv
78 #include <sys/hypervisor.h>
79 #endif
80 #include <vm/kboot_mmu.h>
81 #include <vm/seg_spt.h>
82
83 #include <sys/cmn_err.h>
84
85 /*
86 * Basic parameters for hat operation.
87 */
88 struct hat_mmu_info mmu;
89
90 /*
91 * The page that is the kernel's top level pagetable.
92 *
93 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
94 * on this 4K page for its top level page table. The remaining groups of
95 * 4 entries are used for per processor copies of user VLP pagetables for
96 * running threads. See hat_switch() and reload_pae32() for details.
97 *
98 * vlp_page[0..3] - level==2 PTEs for kernel HAT
99 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
100 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1
101 * etc...
102 */
103 static x86pte_t *vlp_page;
104
105 /*
106 * forward declaration of internal utility routines
107 */
108 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
109 x86pte_t new);
110
111 /*
112 * The kernel address space exists in all HATs. To implement this the
113 * kernel reserves a fixed number of entries in the topmost level(s) of page
114 * tables. The values are setup during startup and then copied to every user
115 * hat created by hat_alloc(). This means that kernelbase must be:
116 *
117 * 4Meg aligned for 32 bit kernels
118 * 512Gig aligned for x86_64 64 bit kernel
119 *
120 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
121 * to each user hat.
122 */
123 typedef struct hat_kernel_range {
124 level_t hkr_level;
125 uintptr_t hkr_start_va;
126 uintptr_t hkr_end_va; /* zero means to end of memory */
127 } hat_kernel_range_t;
128 #define NUM_KERNEL_RANGE 2
129 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
130 static int num_kernel_ranges;
131
132 uint_t use_boot_reserve = 1; /* cleared after early boot process */
133 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
134
135 /*
136 * enable_1gpg: controls 1g page support for user applications.
137 * By default, 1g pages are exported to user applications. enable_1gpg can
138 * be set to 0 to not export.
139 */
140 int enable_1gpg = 1;
141
142 /*
143 * AMD shanghai processors provide better management of 1gb ptes in its tlb.
144 * By default, 1g page support will be disabled for pre-shanghai AMD
145 * processors that don't have optimal tlb support for the 1g page size.
146 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
147 * processors.
148 */
149 int chk_optimal_1gtlb = 1;
150
151
152 #ifdef DEBUG
153 uint_t map1gcnt;
154 #endif
155
156
157 /*
158 * A cpuset for all cpus. This is used for kernel address cross calls, since
159 * the kernel addresses apply to all cpus.
160 */
161 cpuset_t khat_cpuset;
162
163 /*
164 * management stuff for hat structures
165 */
166 kmutex_t hat_list_lock;
167 kcondvar_t hat_list_cv;
168 kmem_cache_t *hat_cache;
169 kmem_cache_t *hat_hash_cache;
170 kmem_cache_t *vlp_hash_cache;
171
172 /*
173 * Simple statistics
174 */
175 struct hatstats hatstat;
176
177 /*
178 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
179 * correctly. For such hypervisors we must set PT_USER for kernel
180 * entries ourselves (normally the emulation would set PT_USER for
181 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
182 * thus set appropriately. Note that dboot/kbm is OK, as only the full
183 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
184 * incorrect.
185 */
186 int pt_kern;
187
188 /*
189 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
190 */
191 extern void atomic_orb(uchar_t *addr, uchar_t val);
192 extern void atomic_andb(uchar_t *addr, uchar_t val);
193
194 #ifndef __xpv
195 extern pfn_t memseg_get_start(struct memseg *);
196 #endif
197
198 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
199 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
200 #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
201 #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
202
203 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
204 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
205 #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
206 #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
207
208 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
209 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
210 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
211 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
212 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
213
214 /*
215 * kmem cache constructor for struct hat
216 */
217 /*ARGSUSED*/
218 static int
hati_constructor(void * buf,void * handle,int kmflags)219 hati_constructor(void *buf, void *handle, int kmflags)
220 {
221 hat_t *hat = buf;
222
223 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
224 bzero(hat->hat_pages_mapped,
225 sizeof (pgcnt_t) * (mmu.max_page_level + 1));
226 hat->hat_ism_pgcnt = 0;
227 hat->hat_stats = 0;
228 hat->hat_flags = 0;
229 CPUSET_ZERO(hat->hat_cpus);
230 hat->hat_htable = NULL;
231 hat->hat_ht_hash = NULL;
232 return (0);
233 }
234
235 /*
236 * Allocate a hat structure for as. We also create the top level
237 * htable and initialize it to contain the kernel hat entries.
238 */
239 hat_t *
hat_alloc(struct as * as)240 hat_alloc(struct as *as)
241 {
242 hat_t *hat;
243 htable_t *ht; /* top level htable */
244 uint_t use_vlp;
245 uint_t r;
246 hat_kernel_range_t *rp;
247 uintptr_t va;
248 uintptr_t eva;
249 uint_t start;
250 uint_t cnt;
251 htable_t *src;
252
253 /*
254 * Once we start creating user process HATs we can enable
255 * the htable_steal() code.
256 */
257 if (can_steal_post_boot == 0)
258 can_steal_post_boot = 1;
259
260 ASSERT(AS_WRITE_HELD(as, &as->a_lock));
261 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
262 hat->hat_as = as;
263 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
264 ASSERT(hat->hat_flags == 0);
265
266 #if defined(__xpv)
267 /*
268 * No VLP stuff on the hypervisor due to the 64-bit split top level
269 * page tables. On 32-bit it's not needed as the hypervisor takes
270 * care of copying the top level PTEs to a below 4Gig page.
271 */
272 use_vlp = 0;
273 #else /* __xpv */
274 /* 32 bit processes uses a VLP style hat when running with PAE */
275 #if defined(__amd64)
276 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
277 #elif defined(__i386)
278 use_vlp = mmu.pae_hat;
279 #endif
280 #endif /* __xpv */
281 if (use_vlp) {
282 hat->hat_flags = HAT_VLP;
283 bzero(hat->hat_vlp_ptes, VLP_SIZE);
284 }
285
286 /*
287 * Allocate the htable hash
288 */
289 if ((hat->hat_flags & HAT_VLP)) {
290 hat->hat_num_hash = mmu.vlp_hash_cnt;
291 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
292 } else {
293 hat->hat_num_hash = mmu.hash_cnt;
294 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
295 }
296 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
297
298 /*
299 * Initialize Kernel HAT entries at the top of the top level page
300 * tables for the new hat.
301 */
302 hat->hat_htable = NULL;
303 hat->hat_ht_cached = NULL;
304 XPV_DISALLOW_MIGRATE();
305 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
306 hat->hat_htable = ht;
307
308 #if defined(__amd64)
309 if (hat->hat_flags & HAT_VLP)
310 goto init_done;
311 #endif
312
313 for (r = 0; r < num_kernel_ranges; ++r) {
314 rp = &kernel_ranges[r];
315 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
316 va += cnt * LEVEL_SIZE(rp->hkr_level)) {
317
318 if (rp->hkr_level == TOP_LEVEL(hat))
319 ht = hat->hat_htable;
320 else
321 ht = htable_create(hat, va, rp->hkr_level,
322 NULL);
323
324 start = htable_va2entry(va, ht);
325 cnt = HTABLE_NUM_PTES(ht) - start;
326 eva = va +
327 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
328 if (rp->hkr_end_va != 0 &&
329 (eva > rp->hkr_end_va || eva == 0))
330 cnt = htable_va2entry(rp->hkr_end_va, ht) -
331 start;
332
333 #if defined(__i386) && !defined(__xpv)
334 if (ht->ht_flags & HTABLE_VLP) {
335 bcopy(&vlp_page[start],
336 &hat->hat_vlp_ptes[start],
337 cnt * sizeof (x86pte_t));
338 continue;
339 }
340 #endif
341 src = htable_lookup(kas.a_hat, va, rp->hkr_level);
342 ASSERT(src != NULL);
343 x86pte_copy(src, ht, start, cnt);
344 htable_release(src);
345 }
346 }
347
348 init_done:
349
350 #if defined(__xpv)
351 /*
352 * Pin top level page tables after initializing them
353 */
354 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
355 #if defined(__amd64)
356 xen_pin(hat->hat_user_ptable, mmu.max_level);
357 #endif
358 #endif
359 XPV_ALLOW_MIGRATE();
360
361 /*
362 * Put it at the start of the global list of all hats (used by stealing)
363 *
364 * kas.a_hat is not in the list but is instead used to find the
365 * first and last items in the list.
366 *
367 * - kas.a_hat->hat_next points to the start of the user hats.
368 * The list ends where hat->hat_next == NULL
369 *
370 * - kas.a_hat->hat_prev points to the last of the user hats.
371 * The list begins where hat->hat_prev == NULL
372 */
373 mutex_enter(&hat_list_lock);
374 hat->hat_prev = NULL;
375 hat->hat_next = kas.a_hat->hat_next;
376 if (hat->hat_next)
377 hat->hat_next->hat_prev = hat;
378 else
379 kas.a_hat->hat_prev = hat;
380 kas.a_hat->hat_next = hat;
381 mutex_exit(&hat_list_lock);
382
383 return (hat);
384 }
385
386 /*
387 * process has finished executing but as has not been cleaned up yet.
388 */
389 /*ARGSUSED*/
390 void
hat_free_start(hat_t * hat)391 hat_free_start(hat_t *hat)
392 {
393 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
394
395 /*
396 * If the hat is currently a stealing victim, wait for the stealing
397 * to finish. Once we mark it as HAT_FREEING, htable_steal()
398 * won't look at its pagetables anymore.
399 */
400 mutex_enter(&hat_list_lock);
401 while (hat->hat_flags & HAT_VICTIM)
402 cv_wait(&hat_list_cv, &hat_list_lock);
403 hat->hat_flags |= HAT_FREEING;
404 mutex_exit(&hat_list_lock);
405 }
406
407 /*
408 * An address space is being destroyed, so we destroy the associated hat.
409 */
410 void
hat_free_end(hat_t * hat)411 hat_free_end(hat_t *hat)
412 {
413 kmem_cache_t *cache;
414
415 ASSERT(hat->hat_flags & HAT_FREEING);
416
417 /*
418 * must not be running on the given hat
419 */
420 ASSERT(CPU->cpu_current_hat != hat);
421
422 /*
423 * Remove it from the list of HATs
424 */
425 mutex_enter(&hat_list_lock);
426 if (hat->hat_prev)
427 hat->hat_prev->hat_next = hat->hat_next;
428 else
429 kas.a_hat->hat_next = hat->hat_next;
430 if (hat->hat_next)
431 hat->hat_next->hat_prev = hat->hat_prev;
432 else
433 kas.a_hat->hat_prev = hat->hat_prev;
434 mutex_exit(&hat_list_lock);
435 hat->hat_next = hat->hat_prev = NULL;
436
437 #if defined(__xpv)
438 /*
439 * On the hypervisor, unpin top level page table(s)
440 */
441 xen_unpin(hat->hat_htable->ht_pfn);
442 #if defined(__amd64)
443 xen_unpin(hat->hat_user_ptable);
444 #endif
445 #endif
446
447 /*
448 * Make a pass through the htables freeing them all up.
449 */
450 htable_purge_hat(hat);
451
452 /*
453 * Decide which kmem cache the hash table came from, then free it.
454 */
455 if (hat->hat_flags & HAT_VLP)
456 cache = vlp_hash_cache;
457 else
458 cache = hat_hash_cache;
459 kmem_cache_free(cache, hat->hat_ht_hash);
460 hat->hat_ht_hash = NULL;
461
462 hat->hat_flags = 0;
463 kmem_cache_free(hat_cache, hat);
464 }
465
466 /*
467 * round kernelbase down to a supported value to use for _userlimit
468 *
469 * userlimit must be aligned down to an entry in the top level htable.
470 * The one exception is for 32 bit HAT's running PAE.
471 */
472 uintptr_t
hat_kernelbase(uintptr_t va)473 hat_kernelbase(uintptr_t va)
474 {
475 #if defined(__i386)
476 va &= LEVEL_MASK(1);
477 #endif
478 if (IN_VA_HOLE(va))
479 panic("_userlimit %p will fall in VA hole\n", (void *)va);
480 return (va);
481 }
482
483 /*
484 *
485 */
486 static void
set_max_page_level()487 set_max_page_level()
488 {
489 level_t lvl;
490
491 if (!kbm_largepage_support) {
492 lvl = 0;
493 } else {
494 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
495 lvl = 2;
496 if (chk_optimal_1gtlb &&
497 cpuid_opteron_erratum(CPU, 6671130)) {
498 lvl = 1;
499 }
500 if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
501 LEVEL_SHIFT(0))) {
502 lvl = 1;
503 }
504 } else {
505 lvl = 1;
506 }
507 }
508 mmu.max_page_level = lvl;
509
510 if ((lvl == 2) && (enable_1gpg == 0))
511 mmu.umax_page_level = 1;
512 else
513 mmu.umax_page_level = lvl;
514 }
515
516 /*
517 * Initialize hat data structures based on processor MMU information.
518 */
519 void
mmu_init(void)520 mmu_init(void)
521 {
522 uint_t max_htables;
523 uint_t pa_bits;
524 uint_t va_bits;
525 int i;
526
527 /*
528 * If CPU enabled the page table global bit, use it for the kernel
529 * This is bit 7 in CR4 (PGE - Page Global Enable).
530 */
531 if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
532 (getcr4() & CR4_PGE) != 0)
533 mmu.pt_global = PT_GLOBAL;
534
535 /*
536 * Detect NX and PAE usage.
537 */
538 mmu.pae_hat = kbm_pae_support;
539 if (kbm_nx_support)
540 mmu.pt_nx = PT_NX;
541 else
542 mmu.pt_nx = 0;
543
544 /*
545 * Use CPU info to set various MMU parameters
546 */
547 cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
548
549 if (va_bits < sizeof (void *) * NBBY) {
550 mmu.hole_start = (1ul << (va_bits - 1));
551 mmu.hole_end = 0ul - mmu.hole_start - 1;
552 } else {
553 mmu.hole_end = 0;
554 mmu.hole_start = mmu.hole_end - 1;
555 }
556 #if defined(OPTERON_ERRATUM_121)
557 /*
558 * If erratum 121 has already been detected at this time, hole_start
559 * contains the value to be subtracted from mmu.hole_start.
560 */
561 ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
562 hole_start = mmu.hole_start - hole_start;
563 #else
564 hole_start = mmu.hole_start;
565 #endif
566 hole_end = mmu.hole_end;
567
568 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
569 if (mmu.pae_hat == 0 && pa_bits > 32)
570 mmu.highest_pfn = PFN_4G - 1;
571
572 if (mmu.pae_hat) {
573 mmu.pte_size = 8; /* 8 byte PTEs */
574 mmu.pte_size_shift = 3;
575 } else {
576 mmu.pte_size = 4; /* 4 byte PTEs */
577 mmu.pte_size_shift = 2;
578 }
579
580 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
581 panic("Processor does not support PAE");
582
583 if (!is_x86_feature(x86_featureset, X86FSET_CX8))
584 panic("Processor does not support cmpxchg8b instruction");
585
586 #if defined(__amd64)
587
588 mmu.num_level = 4;
589 mmu.max_level = 3;
590 mmu.ptes_per_table = 512;
591 mmu.top_level_count = 512;
592
593 mmu.level_shift[0] = 12;
594 mmu.level_shift[1] = 21;
595 mmu.level_shift[2] = 30;
596 mmu.level_shift[3] = 39;
597
598 #elif defined(__i386)
599
600 if (mmu.pae_hat) {
601 mmu.num_level = 3;
602 mmu.max_level = 2;
603 mmu.ptes_per_table = 512;
604 mmu.top_level_count = 4;
605
606 mmu.level_shift[0] = 12;
607 mmu.level_shift[1] = 21;
608 mmu.level_shift[2] = 30;
609
610 } else {
611 mmu.num_level = 2;
612 mmu.max_level = 1;
613 mmu.ptes_per_table = 1024;
614 mmu.top_level_count = 1024;
615
616 mmu.level_shift[0] = 12;
617 mmu.level_shift[1] = 22;
618 }
619
620 #endif /* __i386 */
621
622 for (i = 0; i < mmu.num_level; ++i) {
623 mmu.level_size[i] = 1UL << mmu.level_shift[i];
624 mmu.level_offset[i] = mmu.level_size[i] - 1;
625 mmu.level_mask[i] = ~mmu.level_offset[i];
626 }
627
628 set_max_page_level();
629
630 mmu_page_sizes = mmu.max_page_level + 1;
631 mmu_exported_page_sizes = mmu.umax_page_level + 1;
632
633 /* restrict legacy applications from using pagesizes 1g and above */
634 mmu_legacy_page_sizes =
635 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
636
637
638 for (i = 0; i <= mmu.max_page_level; ++i) {
639 mmu.pte_bits[i] = PT_VALID | pt_kern;
640 if (i > 0)
641 mmu.pte_bits[i] |= PT_PAGESIZE;
642 }
643
644 /*
645 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
646 */
647 for (i = 1; i < mmu.num_level; ++i)
648 mmu.ptp_bits[i] = PT_PTPBITS;
649
650 #if defined(__i386)
651 mmu.ptp_bits[2] = PT_VALID;
652 #endif
653
654 /*
655 * Compute how many hash table entries to have per process for htables.
656 * We start with 1 page's worth of entries.
657 *
658 * If physical memory is small, reduce the amount need to cover it.
659 */
660 max_htables = physmax / mmu.ptes_per_table;
661 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
662 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
663 mmu.hash_cnt >>= 1;
664 mmu.vlp_hash_cnt = mmu.hash_cnt;
665
666 #if defined(__amd64)
667 /*
668 * If running in 64 bits and physical memory is large,
669 * increase the size of the cache to cover all of memory for
670 * a 64 bit process.
671 */
672 #define HASH_MAX_LENGTH 4
673 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
674 mmu.hash_cnt <<= 1;
675 #endif
676 }
677
678
679 /*
680 * initialize hat data structures
681 */
682 void
hat_init()683 hat_init()
684 {
685 #if defined(__i386)
686 /*
687 * _userlimit must be aligned correctly
688 */
689 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
690 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
691 (void *)_userlimit, (void *)LEVEL_SIZE(1));
692 halt("hat_init(): Unable to continue");
693 }
694 #endif
695
696 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
697
698 /*
699 * initialize kmem caches
700 */
701 htable_init();
702 hment_init();
703
704 hat_cache = kmem_cache_create("hat_t",
705 sizeof (hat_t), 0, hati_constructor, NULL, NULL,
706 NULL, 0, 0);
707
708 hat_hash_cache = kmem_cache_create("HatHash",
709 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
710 NULL, 0, 0);
711
712 /*
713 * VLP hats can use a smaller hash table size on large memroy machines
714 */
715 if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
716 vlp_hash_cache = hat_hash_cache;
717 } else {
718 vlp_hash_cache = kmem_cache_create("HatVlpHash",
719 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
720 NULL, 0, 0);
721 }
722
723 /*
724 * Set up the kernel's hat
725 */
726 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
727 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
728 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
729 kas.a_hat->hat_as = &kas;
730 kas.a_hat->hat_flags = 0;
731 AS_LOCK_EXIT(&kas, &kas.a_lock);
732
733 CPUSET_ZERO(khat_cpuset);
734 CPUSET_ADD(khat_cpuset, CPU->cpu_id);
735
736 /*
737 * The kernel hat's next pointer serves as the head of the hat list .
738 * The kernel hat's prev pointer tracks the last hat on the list for
739 * htable_steal() to use.
740 */
741 kas.a_hat->hat_next = NULL;
742 kas.a_hat->hat_prev = NULL;
743
744 /*
745 * Allocate an htable hash bucket for the kernel
746 * XX64 - tune for 64 bit procs
747 */
748 kas.a_hat->hat_num_hash = mmu.hash_cnt;
749 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
750 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
751
752 /*
753 * zero out the top level and cached htable pointers
754 */
755 kas.a_hat->hat_ht_cached = NULL;
756 kas.a_hat->hat_htable = NULL;
757
758 /*
759 * Pre-allocate hrm_hashtab before enabling the collection of
760 * refmod statistics. Allocating on the fly would mean us
761 * running the risk of suffering recursive mutex enters or
762 * deadlocks.
763 */
764 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
765 KM_SLEEP);
766 }
767
768 /*
769 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
770 *
771 * Each CPU has a set of 2 pagetables that are reused for any 32 bit
772 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
773 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
774 */
775 /*ARGSUSED*/
776 static void
hat_vlp_setup(struct cpu * cpu)777 hat_vlp_setup(struct cpu *cpu)
778 {
779 #if defined(__amd64) && !defined(__xpv)
780 struct hat_cpu_info *hci = cpu->cpu_hat_info;
781 pfn_t pfn;
782
783 /*
784 * allocate the level==2 page table for the bottom most
785 * 512Gig of address space (this is where 32 bit apps live)
786 */
787 ASSERT(hci != NULL);
788 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
789
790 /*
791 * Allocate a top level pagetable and copy the kernel's
792 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
793 */
794 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
795 hci->hci_vlp_pfn =
796 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
797 ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
798 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
799
800 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
801 ASSERT(pfn != PFN_INVALID);
802 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
803 #endif /* __amd64 && !__xpv */
804 }
805
806 /*ARGSUSED*/
807 static void
hat_vlp_teardown(cpu_t * cpu)808 hat_vlp_teardown(cpu_t *cpu)
809 {
810 #if defined(__amd64) && !defined(__xpv)
811 struct hat_cpu_info *hci;
812
813 if ((hci = cpu->cpu_hat_info) == NULL)
814 return;
815 if (hci->hci_vlp_l2ptes)
816 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
817 if (hci->hci_vlp_l3ptes)
818 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
819 #endif
820 }
821
822 #define NEXT_HKR(r, l, s, e) { \
823 kernel_ranges[r].hkr_level = l; \
824 kernel_ranges[r].hkr_start_va = s; \
825 kernel_ranges[r].hkr_end_va = e; \
826 ++r; \
827 }
828
829 /*
830 * Finish filling in the kernel hat.
831 * Pre fill in all top level kernel page table entries for the kernel's
832 * part of the address range. From this point on we can't use any new
833 * kernel large pages if they need PTE's at max_level
834 *
835 * create the kmap mappings.
836 */
837 void
hat_init_finish(void)838 hat_init_finish(void)
839 {
840 size_t size;
841 uint_t r = 0;
842 uintptr_t va;
843 hat_kernel_range_t *rp;
844
845
846 /*
847 * We are now effectively running on the kernel hat.
848 * Clearing use_boot_reserve shuts off using the pre-allocated boot
849 * reserve for all HAT allocations. From here on, the reserves are
850 * only used when avoiding recursion in kmem_alloc().
851 */
852 use_boot_reserve = 0;
853 htable_adjust_reserve();
854
855 /*
856 * User HATs are initialized with copies of all kernel mappings in
857 * higher level page tables. Ensure that those entries exist.
858 */
859 #if defined(__amd64)
860
861 NEXT_HKR(r, 3, kernelbase, 0);
862 #if defined(__xpv)
863 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
864 #endif
865
866 #elif defined(__i386)
867
868 #if !defined(__xpv)
869 if (mmu.pae_hat) {
870 va = kernelbase;
871 if ((va & LEVEL_MASK(2)) != va) {
872 va = P2ROUNDUP(va, LEVEL_SIZE(2));
873 NEXT_HKR(r, 1, kernelbase, va);
874 }
875 if (va != 0)
876 NEXT_HKR(r, 2, va, 0);
877 } else
878 #endif /* __xpv */
879 NEXT_HKR(r, 1, kernelbase, 0);
880
881 #endif /* __i386 */
882
883 num_kernel_ranges = r;
884
885 /*
886 * Create all the kernel pagetables that will have entries
887 * shared to user HATs.
888 */
889 for (r = 0; r < num_kernel_ranges; ++r) {
890 rp = &kernel_ranges[r];
891 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
892 va += LEVEL_SIZE(rp->hkr_level)) {
893 htable_t *ht;
894
895 if (IN_HYPERVISOR_VA(va))
896 continue;
897
898 /* can/must skip if a page mapping already exists */
899 if (rp->hkr_level <= mmu.max_page_level &&
900 (ht = htable_getpage(kas.a_hat, va, NULL)) !=
901 NULL) {
902 htable_release(ht);
903 continue;
904 }
905
906 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
907 NULL);
908 }
909 }
910
911 /*
912 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
913 * page holding the top level pagetable. We use the remainder for
914 * the "per CPU" page tables for VLP processes.
915 * Map the top level kernel pagetable into the kernel to make
916 * it easy to use bcopy access these tables.
917 */
918 if (mmu.pae_hat) {
919 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
920 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
921 kas.a_hat->hat_htable->ht_pfn,
922 #if !defined(__xpv)
923 PROT_WRITE |
924 #endif
925 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
926 HAT_LOAD | HAT_LOAD_NOCONSIST);
927 }
928 hat_vlp_setup(CPU);
929
930 /*
931 * Create kmap (cached mappings of kernel PTEs)
932 * for 32 bit we map from segmap_start .. ekernelheap
933 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
934 */
935 #if defined(__i386)
936 size = (uintptr_t)ekernelheap - segmap_start;
937 #elif defined(__amd64)
938 size = segmapsize;
939 #endif
940 hat_kmap_init((uintptr_t)segmap_start, size);
941 }
942
943 /*
944 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
945 * are 32 bit, so for safety we must use cas64() to install these.
946 */
947 #ifdef __i386
948 static void
reload_pae32(hat_t * hat,cpu_t * cpu)949 reload_pae32(hat_t *hat, cpu_t *cpu)
950 {
951 x86pte_t *src;
952 x86pte_t *dest;
953 x86pte_t pte;
954 int i;
955
956 /*
957 * Load the 4 entries of the level 2 page table into this
958 * cpu's range of the vlp_page and point cr3 at them.
959 */
960 ASSERT(mmu.pae_hat);
961 src = hat->hat_vlp_ptes;
962 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
963 for (i = 0; i < VLP_NUM_PTES; ++i) {
964 for (;;) {
965 pte = dest[i];
966 if (pte == src[i])
967 break;
968 if (cas64(dest + i, pte, src[i]) != src[i])
969 break;
970 }
971 }
972 }
973 #endif
974
975 /*
976 * Switch to a new active hat, maintaining bit masks to track active CPUs.
977 *
978 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
979 * remains a 32-bit value.
980 */
981 void
hat_switch(hat_t * hat)982 hat_switch(hat_t *hat)
983 {
984 uint64_t newcr3;
985 cpu_t *cpu = CPU;
986 hat_t *old = cpu->cpu_current_hat;
987
988 /*
989 * set up this information first, so we don't miss any cross calls
990 */
991 if (old != NULL) {
992 if (old == hat)
993 return;
994 if (old != kas.a_hat)
995 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
996 }
997
998 /*
999 * Add this CPU to the active set for this HAT.
1000 */
1001 if (hat != kas.a_hat) {
1002 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1003 }
1004 cpu->cpu_current_hat = hat;
1005
1006 /*
1007 * now go ahead and load cr3
1008 */
1009 if (hat->hat_flags & HAT_VLP) {
1010 #if defined(__amd64)
1011 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1012
1013 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1014 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1015 #elif defined(__i386)
1016 reload_pae32(hat, cpu);
1017 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1018 (cpu->cpu_id + 1) * VLP_SIZE;
1019 #endif
1020 } else {
1021 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1022 }
1023 #ifdef __xpv
1024 {
1025 struct mmuext_op t[2];
1026 uint_t retcnt;
1027 uint_t opcnt = 1;
1028
1029 t[0].cmd = MMUEXT_NEW_BASEPTR;
1030 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1031 #if defined(__amd64)
1032 /*
1033 * There's an interesting problem here, as to what to
1034 * actually specify when switching to the kernel hat.
1035 * For now we'll reuse the kernel hat again.
1036 */
1037 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1038 if (hat == kas.a_hat)
1039 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1040 else
1041 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1042 ++opcnt;
1043 #endif /* __amd64 */
1044 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1045 panic("HYPERVISOR_mmu_update() failed");
1046 ASSERT(retcnt == opcnt);
1047
1048 }
1049 #else
1050 setcr3(newcr3);
1051 #endif
1052 ASSERT(cpu == CPU);
1053 }
1054
1055 /*
1056 * Utility to return a valid x86pte_t from protections, pfn, and level number
1057 */
1058 static x86pte_t
hati_mkpte(pfn_t pfn,uint_t attr,level_t level,uint_t flags)1059 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1060 {
1061 x86pte_t pte;
1062 uint_t cache_attr = attr & HAT_ORDER_MASK;
1063
1064 pte = MAKEPTE(pfn, level);
1065
1066 if (attr & PROT_WRITE)
1067 PTE_SET(pte, PT_WRITABLE);
1068
1069 if (attr & PROT_USER)
1070 PTE_SET(pte, PT_USER);
1071
1072 if (!(attr & PROT_EXEC))
1073 PTE_SET(pte, mmu.pt_nx);
1074
1075 /*
1076 * Set the software bits used track ref/mod sync's and hments.
1077 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1078 */
1079 if (flags & HAT_LOAD_NOCONSIST)
1080 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1081 else if (attr & HAT_NOSYNC)
1082 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1083
1084 /*
1085 * Set the caching attributes in the PTE. The combination
1086 * of attributes are poorly defined, so we pay attention
1087 * to them in the given order.
1088 *
1089 * The test for HAT_STRICTORDER is different because it's defined
1090 * as "0" - which was a stupid thing to do, but is too late to change!
1091 */
1092 if (cache_attr == HAT_STRICTORDER) {
1093 PTE_SET(pte, PT_NOCACHE);
1094 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1095 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1096 /* nothing to set */;
1097 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1098 PTE_SET(pte, PT_NOCACHE);
1099 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1100 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1101 else
1102 PTE_SET(pte, PT_WRITETHRU);
1103 } else {
1104 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1105 }
1106
1107 return (pte);
1108 }
1109
1110 /*
1111 * Duplicate address translations of the parent to the child.
1112 * This function really isn't used anymore.
1113 */
1114 /*ARGSUSED*/
1115 int
hat_dup(hat_t * old,hat_t * new,caddr_t addr,size_t len,uint_t flag)1116 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1117 {
1118 ASSERT((uintptr_t)addr < kernelbase);
1119 ASSERT(new != kas.a_hat);
1120 ASSERT(old != kas.a_hat);
1121 return (0);
1122 }
1123
1124 /*
1125 * Allocate any hat resources required for a process being swapped in.
1126 */
1127 /*ARGSUSED*/
1128 void
hat_swapin(hat_t * hat)1129 hat_swapin(hat_t *hat)
1130 {
1131 /* do nothing - we let everything fault back in */
1132 }
1133
1134 /*
1135 * Unload all translations associated with an address space of a process
1136 * that is being swapped out.
1137 */
1138 void
hat_swapout(hat_t * hat)1139 hat_swapout(hat_t *hat)
1140 {
1141 uintptr_t vaddr = (uintptr_t)0;
1142 uintptr_t eaddr = _userlimit;
1143 htable_t *ht = NULL;
1144 level_t l;
1145
1146 XPV_DISALLOW_MIGRATE();
1147 /*
1148 * We can't just call hat_unload(hat, 0, _userlimit...) here, because
1149 * seg_spt and shared pagetables can't be swapped out.
1150 * Take a look at segspt_shmswapout() - it's a big no-op.
1151 *
1152 * Instead we'll walk through all the address space and unload
1153 * any mappings which we are sure are not shared, not locked.
1154 */
1155 ASSERT(IS_PAGEALIGNED(vaddr));
1156 ASSERT(IS_PAGEALIGNED(eaddr));
1157 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1158 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1159 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1160
1161 while (vaddr < eaddr) {
1162 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1163 if (ht == NULL)
1164 break;
1165
1166 ASSERT(!IN_VA_HOLE(vaddr));
1167
1168 /*
1169 * If the page table is shared skip its entire range.
1170 */
1171 l = ht->ht_level;
1172 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1173 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1174 htable_release(ht);
1175 ht = NULL;
1176 continue;
1177 }
1178
1179 /*
1180 * If the page table has no locked entries, unload this one.
1181 */
1182 if (ht->ht_lock_cnt == 0)
1183 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1184 HAT_UNLOAD_UNMAP);
1185
1186 /*
1187 * If we have a level 0 page table with locked entries,
1188 * skip the entire page table, otherwise skip just one entry.
1189 */
1190 if (ht->ht_lock_cnt > 0 && l == 0)
1191 vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1192 else
1193 vaddr += LEVEL_SIZE(l);
1194 }
1195 if (ht)
1196 htable_release(ht);
1197
1198 /*
1199 * We're in swapout because the system is low on memory, so
1200 * go back and flush all the htables off the cached list.
1201 */
1202 htable_purge_hat(hat);
1203 XPV_ALLOW_MIGRATE();
1204 }
1205
1206 /*
1207 * returns number of bytes that have valid mappings in hat.
1208 */
1209 size_t
hat_get_mapped_size(hat_t * hat)1210 hat_get_mapped_size(hat_t *hat)
1211 {
1212 size_t total = 0;
1213 int l;
1214
1215 for (l = 0; l <= mmu.max_page_level; l++)
1216 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1217 total += hat->hat_ism_pgcnt;
1218
1219 return (total);
1220 }
1221
1222 /*
1223 * enable/disable collection of stats for hat.
1224 */
1225 int
hat_stats_enable(hat_t * hat)1226 hat_stats_enable(hat_t *hat)
1227 {
1228 atomic_add_32(&hat->hat_stats, 1);
1229 return (1);
1230 }
1231
1232 void
hat_stats_disable(hat_t * hat)1233 hat_stats_disable(hat_t *hat)
1234 {
1235 atomic_add_32(&hat->hat_stats, -1);
1236 }
1237
1238 /*
1239 * Utility to sync the ref/mod bits from a page table entry to the page_t
1240 * We must be holding the mapping list lock when this is called.
1241 */
1242 static void
hati_sync_pte_to_page(page_t * pp,x86pte_t pte,level_t level)1243 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1244 {
1245 uint_t rm = 0;
1246 pgcnt_t pgcnt;
1247
1248 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1249 return;
1250
1251 if (PTE_GET(pte, PT_REF))
1252 rm |= P_REF;
1253
1254 if (PTE_GET(pte, PT_MOD))
1255 rm |= P_MOD;
1256
1257 if (rm == 0)
1258 return;
1259
1260 /*
1261 * sync to all constituent pages of a large page
1262 */
1263 ASSERT(x86_hm_held(pp));
1264 pgcnt = page_get_pagecnt(level);
1265 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1266 for (; pgcnt > 0; --pgcnt) {
1267 /*
1268 * hat_page_demote() can't decrease
1269 * pszc below this mapping size
1270 * since this large mapping existed after we
1271 * took mlist lock.
1272 */
1273 ASSERT(pp->p_szc >= level);
1274 hat_page_setattr(pp, rm);
1275 ++pp;
1276 }
1277 }
1278
1279 /*
1280 * This the set of PTE bits for PFN, permissions and caching
1281 * that are allowed to change on a HAT_LOAD_REMAP
1282 */
1283 #define PT_REMAP_BITS \
1284 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
1285 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1286
1287 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1288 /*
1289 * Do the low-level work to get a mapping entered into a HAT's pagetables
1290 * and in the mapping list of the associated page_t.
1291 */
1292 static int
hati_pte_map(htable_t * ht,uint_t entry,page_t * pp,x86pte_t pte,int flags,void * pte_ptr)1293 hati_pte_map(
1294 htable_t *ht,
1295 uint_t entry,
1296 page_t *pp,
1297 x86pte_t pte,
1298 int flags,
1299 void *pte_ptr)
1300 {
1301 hat_t *hat = ht->ht_hat;
1302 x86pte_t old_pte;
1303 level_t l = ht->ht_level;
1304 hment_t *hm;
1305 uint_t is_consist;
1306 uint_t is_locked;
1307 int rv = 0;
1308
1309 /*
1310 * Is this a consistent (ie. need mapping list lock) mapping?
1311 */
1312 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1313
1314 /*
1315 * Track locked mapping count in the htable. Do this first,
1316 * as we track locking even if there already is a mapping present.
1317 */
1318 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1319 if (is_locked)
1320 HTABLE_LOCK_INC(ht);
1321
1322 /*
1323 * Acquire the page's mapping list lock and get an hment to use.
1324 * Note that hment_prepare() might return NULL.
1325 */
1326 if (is_consist) {
1327 x86_hm_enter(pp);
1328 hm = hment_prepare(ht, entry, pp);
1329 }
1330
1331 /*
1332 * Set the new pte, retrieving the old one at the same time.
1333 */
1334 old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1335
1336 /*
1337 * Did we get a large page / page table collision?
1338 */
1339 if (old_pte == LPAGE_ERROR) {
1340 if (is_locked)
1341 HTABLE_LOCK_DEC(ht);
1342 rv = -1;
1343 goto done;
1344 }
1345
1346 /*
1347 * If the mapping didn't change there is nothing more to do.
1348 */
1349 if (PTE_EQUIV(pte, old_pte))
1350 goto done;
1351
1352 /*
1353 * Install a new mapping in the page's mapping list
1354 */
1355 if (!PTE_ISVALID(old_pte)) {
1356 if (is_consist) {
1357 hment_assign(ht, entry, pp, hm);
1358 x86_hm_exit(pp);
1359 } else {
1360 ASSERT(flags & HAT_LOAD_NOCONSIST);
1361 }
1362 #if defined(__amd64)
1363 if (ht->ht_flags & HTABLE_VLP) {
1364 cpu_t *cpu = CPU;
1365 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1366 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1367 }
1368 #endif
1369 HTABLE_INC(ht->ht_valid_cnt);
1370 PGCNT_INC(hat, l);
1371 return (rv);
1372 }
1373
1374 /*
1375 * Remap's are more complicated:
1376 * - HAT_LOAD_REMAP must be specified if changing the pfn.
1377 * We also require that NOCONSIST be specified.
1378 * - Otherwise only permission or caching bits may change.
1379 */
1380 if (!PTE_ISPAGE(old_pte, l))
1381 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1382
1383 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1384 REMAPASSERT(flags & HAT_LOAD_REMAP);
1385 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1386 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1387 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1388 pf_is_memory(PTE2PFN(pte, l)));
1389 REMAPASSERT(!is_consist);
1390 }
1391
1392 /*
1393 * We only let remaps change the certain bits in the PTE.
1394 */
1395 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1396 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1397 old_pte, pte);
1398
1399 /*
1400 * We don't create any mapping list entries on a remap, so release
1401 * any allocated hment after we drop the mapping list lock.
1402 */
1403 done:
1404 if (is_consist) {
1405 x86_hm_exit(pp);
1406 if (hm != NULL)
1407 hment_free(hm);
1408 }
1409 return (rv);
1410 }
1411
1412 /*
1413 * Internal routine to load a single page table entry. This only fails if
1414 * we attempt to overwrite a page table link with a large page.
1415 */
1416 static int
hati_load_common(hat_t * hat,uintptr_t va,page_t * pp,uint_t attr,uint_t flags,level_t level,pfn_t pfn)1417 hati_load_common(
1418 hat_t *hat,
1419 uintptr_t va,
1420 page_t *pp,
1421 uint_t attr,
1422 uint_t flags,
1423 level_t level,
1424 pfn_t pfn)
1425 {
1426 htable_t *ht;
1427 uint_t entry;
1428 x86pte_t pte;
1429 int rv = 0;
1430
1431 /*
1432 * The number 16 is arbitrary and here to catch a recursion problem
1433 * early before we blow out the kernel stack.
1434 */
1435 ++curthread->t_hatdepth;
1436 ASSERT(curthread->t_hatdepth < 16);
1437
1438 ASSERT(hat == kas.a_hat ||
1439 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1440
1441 if (flags & HAT_LOAD_SHARE)
1442 hat->hat_flags |= HAT_SHARED;
1443
1444 /*
1445 * Find the page table that maps this page if it already exists.
1446 */
1447 ht = htable_lookup(hat, va, level);
1448
1449 /*
1450 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1451 */
1452 if (pp == NULL)
1453 flags |= HAT_LOAD_NOCONSIST;
1454
1455 if (ht == NULL) {
1456 ht = htable_create(hat, va, level, NULL);
1457 ASSERT(ht != NULL);
1458 }
1459 entry = htable_va2entry(va, ht);
1460
1461 /*
1462 * a bunch of paranoid error checking
1463 */
1464 ASSERT(ht->ht_busy > 0);
1465 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1466 panic("hati_load_common: bad htable %p, va %p",
1467 (void *)ht, (void *)va);
1468 ASSERT(ht->ht_level == level);
1469
1470 /*
1471 * construct the new PTE
1472 */
1473 if (hat == kas.a_hat)
1474 attr &= ~PROT_USER;
1475 pte = hati_mkpte(pfn, attr, level, flags);
1476 if (hat == kas.a_hat && va >= kernelbase)
1477 PTE_SET(pte, mmu.pt_global);
1478
1479 /*
1480 * establish the mapping
1481 */
1482 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1483
1484 /*
1485 * release the htable and any reserves
1486 */
1487 htable_release(ht);
1488 --curthread->t_hatdepth;
1489 return (rv);
1490 }
1491
1492 /*
1493 * special case of hat_memload to deal with some kernel addrs for performance
1494 */
1495 static void
hat_kmap_load(caddr_t addr,page_t * pp,uint_t attr,uint_t flags)1496 hat_kmap_load(
1497 caddr_t addr,
1498 page_t *pp,
1499 uint_t attr,
1500 uint_t flags)
1501 {
1502 uintptr_t va = (uintptr_t)addr;
1503 x86pte_t pte;
1504 pfn_t pfn = page_pptonum(pp);
1505 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
1506 htable_t *ht;
1507 uint_t entry;
1508 void *pte_ptr;
1509
1510 /*
1511 * construct the requested PTE
1512 */
1513 attr &= ~PROT_USER;
1514 attr |= HAT_STORECACHING_OK;
1515 pte = hati_mkpte(pfn, attr, 0, flags);
1516 PTE_SET(pte, mmu.pt_global);
1517
1518 /*
1519 * Figure out the pte_ptr and htable and use common code to finish up
1520 */
1521 if (mmu.pae_hat)
1522 pte_ptr = mmu.kmap_ptes + pg_off;
1523 else
1524 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1525 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1526 LEVEL_SHIFT(1)];
1527 entry = htable_va2entry(va, ht);
1528 ++curthread->t_hatdepth;
1529 ASSERT(curthread->t_hatdepth < 16);
1530 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1531 --curthread->t_hatdepth;
1532 }
1533
1534 /*
1535 * hat_memload() - load a translation to the given page struct
1536 *
1537 * Flags for hat_memload/hat_devload/hat_*attr.
1538 *
1539 * HAT_LOAD Default flags to load a translation to the page.
1540 *
1541 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
1542 * and hat_devload().
1543 *
1544 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1545 * sets PT_NOCONSIST
1546 *
1547 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
1548 * that map some user pages (not kas) is shared by more
1549 * than one process (eg. ISM).
1550 *
1551 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
1552 *
1553 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
1554 * point, it's setting up mapping to allocate internal
1555 * hat layer data structures. This flag forces hat layer
1556 * to tap its reserves in order to prevent infinite
1557 * recursion.
1558 *
1559 * The following is a protection attribute (like PROT_READ, etc.)
1560 *
1561 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
1562 * are never cleared.
1563 *
1564 * Installing new valid PTE's and creation of the mapping list
1565 * entry are controlled under the same lock. It's derived from the
1566 * page_t being mapped.
1567 */
1568 static uint_t supported_memload_flags =
1569 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1570 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1571
1572 void
hat_memload(hat_t * hat,caddr_t addr,page_t * pp,uint_t attr,uint_t flags)1573 hat_memload(
1574 hat_t *hat,
1575 caddr_t addr,
1576 page_t *pp,
1577 uint_t attr,
1578 uint_t flags)
1579 {
1580 uintptr_t va = (uintptr_t)addr;
1581 level_t level = 0;
1582 pfn_t pfn = page_pptonum(pp);
1583
1584 XPV_DISALLOW_MIGRATE();
1585 ASSERT(IS_PAGEALIGNED(va));
1586 ASSERT(hat == kas.a_hat || va < _userlimit);
1587 ASSERT(hat == kas.a_hat ||
1588 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1589 ASSERT((flags & supported_memload_flags) == flags);
1590
1591 ASSERT(!IN_VA_HOLE(va));
1592 ASSERT(!PP_ISFREE(pp));
1593
1594 /*
1595 * kernel address special case for performance.
1596 */
1597 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1598 ASSERT(hat == kas.a_hat);
1599 hat_kmap_load(addr, pp, attr, flags);
1600 XPV_ALLOW_MIGRATE();
1601 return;
1602 }
1603
1604 /*
1605 * This is used for memory with normal caching enabled, so
1606 * always set HAT_STORECACHING_OK.
1607 */
1608 attr |= HAT_STORECACHING_OK;
1609 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1610 panic("unexpected hati_load_common() failure");
1611 XPV_ALLOW_MIGRATE();
1612 }
1613
1614 /* ARGSUSED */
1615 void
hat_memload_region(struct hat * hat,caddr_t addr,struct page * pp,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)1616 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1617 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1618 {
1619 hat_memload(hat, addr, pp, attr, flags);
1620 }
1621
1622 /*
1623 * Load the given array of page structs using large pages when possible
1624 */
1625 void
hat_memload_array(hat_t * hat,caddr_t addr,size_t len,page_t ** pages,uint_t attr,uint_t flags)1626 hat_memload_array(
1627 hat_t *hat,
1628 caddr_t addr,
1629 size_t len,
1630 page_t **pages,
1631 uint_t attr,
1632 uint_t flags)
1633 {
1634 uintptr_t va = (uintptr_t)addr;
1635 uintptr_t eaddr = va + len;
1636 level_t level;
1637 size_t pgsize;
1638 pgcnt_t pgindx = 0;
1639 pfn_t pfn;
1640 pgcnt_t i;
1641
1642 XPV_DISALLOW_MIGRATE();
1643 ASSERT(IS_PAGEALIGNED(va));
1644 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1645 ASSERT(hat == kas.a_hat ||
1646 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1647 ASSERT((flags & supported_memload_flags) == flags);
1648
1649 /*
1650 * memload is used for memory with full caching enabled, so
1651 * set HAT_STORECACHING_OK.
1652 */
1653 attr |= HAT_STORECACHING_OK;
1654
1655 /*
1656 * handle all pages using largest possible pagesize
1657 */
1658 while (va < eaddr) {
1659 /*
1660 * decide what level mapping to use (ie. pagesize)
1661 */
1662 pfn = page_pptonum(pages[pgindx]);
1663 for (level = mmu.max_page_level; ; --level) {
1664 pgsize = LEVEL_SIZE(level);
1665 if (level == 0)
1666 break;
1667
1668 if (!IS_P2ALIGNED(va, pgsize) ||
1669 (eaddr - va) < pgsize ||
1670 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1671 continue;
1672
1673 /*
1674 * To use a large mapping of this size, all the
1675 * pages we are passed must be sequential subpages
1676 * of the large page.
1677 * hat_page_demote() can't change p_szc because
1678 * all pages are locked.
1679 */
1680 if (pages[pgindx]->p_szc >= level) {
1681 for (i = 0; i < mmu_btop(pgsize); ++i) {
1682 if (pfn + i !=
1683 page_pptonum(pages[pgindx + i]))
1684 break;
1685 ASSERT(pages[pgindx + i]->p_szc >=
1686 level);
1687 ASSERT(pages[pgindx] + i ==
1688 pages[pgindx + i]);
1689 }
1690 if (i == mmu_btop(pgsize)) {
1691 #ifdef DEBUG
1692 if (level == 2)
1693 map1gcnt++;
1694 #endif
1695 break;
1696 }
1697 }
1698 }
1699
1700 /*
1701 * Load this page mapping. If the load fails, try a smaller
1702 * pagesize.
1703 */
1704 ASSERT(!IN_VA_HOLE(va));
1705 while (hati_load_common(hat, va, pages[pgindx], attr,
1706 flags, level, pfn) != 0) {
1707 if (level == 0)
1708 panic("unexpected hati_load_common() failure");
1709 --level;
1710 pgsize = LEVEL_SIZE(level);
1711 }
1712
1713 /*
1714 * move to next page
1715 */
1716 va += pgsize;
1717 pgindx += mmu_btop(pgsize);
1718 }
1719 XPV_ALLOW_MIGRATE();
1720 }
1721
1722 /* ARGSUSED */
1723 void
hat_memload_array_region(struct hat * hat,caddr_t addr,size_t len,struct page ** pps,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)1724 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1725 struct page **pps, uint_t attr, uint_t flags,
1726 hat_region_cookie_t rcookie)
1727 {
1728 hat_memload_array(hat, addr, len, pps, attr, flags);
1729 }
1730
1731 /*
1732 * void hat_devload(hat, addr, len, pf, attr, flags)
1733 * load/lock the given page frame number
1734 *
1735 * Advisory ordering attributes. Apply only to device mappings.
1736 *
1737 * HAT_STRICTORDER: the CPU must issue the references in order, as the
1738 * programmer specified. This is the default.
1739 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1740 * of reordering; store or load with store or load).
1741 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1742 * to consecutive locations (for example, turn two consecutive byte
1743 * stores into one halfword store), and it may batch individual loads
1744 * (for example, turn two consecutive byte loads into one halfword load).
1745 * This also implies re-ordering.
1746 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1747 * until another store occurs. The default is to fetch new data
1748 * on every load. This also implies merging.
1749 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1750 * the device (perhaps with other data) at a later time. The default is
1751 * to push the data right away. This also implies load caching.
1752 *
1753 * Equivalent of hat_memload(), but can be used for device memory where
1754 * there are no page_t's and we support additional flags (write merging, etc).
1755 * Note that we can have large page mappings with this interface.
1756 */
1757 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1758 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1759 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1760
1761 void
hat_devload(hat_t * hat,caddr_t addr,size_t len,pfn_t pfn,uint_t attr,int flags)1762 hat_devload(
1763 hat_t *hat,
1764 caddr_t addr,
1765 size_t len,
1766 pfn_t pfn,
1767 uint_t attr,
1768 int flags)
1769 {
1770 uintptr_t va = ALIGN2PAGE(addr);
1771 uintptr_t eva = va + len;
1772 level_t level;
1773 size_t pgsize;
1774 page_t *pp;
1775 int f; /* per PTE copy of flags - maybe modified */
1776 uint_t a; /* per PTE copy of attr */
1777
1778 XPV_DISALLOW_MIGRATE();
1779 ASSERT(IS_PAGEALIGNED(va));
1780 ASSERT(hat == kas.a_hat || eva <= _userlimit);
1781 ASSERT(hat == kas.a_hat ||
1782 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1783 ASSERT((flags & supported_devload_flags) == flags);
1784
1785 /*
1786 * handle all pages
1787 */
1788 while (va < eva) {
1789
1790 /*
1791 * decide what level mapping to use (ie. pagesize)
1792 */
1793 for (level = mmu.max_page_level; ; --level) {
1794 pgsize = LEVEL_SIZE(level);
1795 if (level == 0)
1796 break;
1797 if (IS_P2ALIGNED(va, pgsize) &&
1798 (eva - va) >= pgsize &&
1799 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1800 #ifdef DEBUG
1801 if (level == 2)
1802 map1gcnt++;
1803 #endif
1804 break;
1805 }
1806 }
1807
1808 /*
1809 * If this is just memory then allow caching (this happens
1810 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1811 * to override that. If we don't have a page_t then make sure
1812 * NOCONSIST is set.
1813 */
1814 a = attr;
1815 f = flags;
1816 if (!pf_is_memory(pfn))
1817 f |= HAT_LOAD_NOCONSIST;
1818 else if (!(a & HAT_PLAT_NOCACHE))
1819 a |= HAT_STORECACHING_OK;
1820
1821 if (f & HAT_LOAD_NOCONSIST)
1822 pp = NULL;
1823 else
1824 pp = page_numtopp_nolock(pfn);
1825
1826 /*
1827 * Check to make sure we are really trying to map a valid
1828 * memory page. The caller wishing to intentionally map
1829 * free memory pages will have passed the HAT_LOAD_NOCONSIST
1830 * flag, then pp will be NULL.
1831 */
1832 if (pp != NULL) {
1833 if (PP_ISFREE(pp)) {
1834 panic("hat_devload: loading "
1835 "a mapping to free page %p", (void *)pp);
1836 }
1837
1838 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1839 panic("hat_devload: loading a mapping "
1840 "to an unlocked page %p",
1841 (void *)pp);
1842 }
1843 }
1844
1845 /*
1846 * load this page mapping
1847 */
1848 ASSERT(!IN_VA_HOLE(va));
1849 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1850 if (level == 0)
1851 panic("unexpected hati_load_common() failure");
1852 --level;
1853 pgsize = LEVEL_SIZE(level);
1854 }
1855
1856 /*
1857 * move to next page
1858 */
1859 va += pgsize;
1860 pfn += mmu_btop(pgsize);
1861 }
1862 XPV_ALLOW_MIGRATE();
1863 }
1864
1865 /*
1866 * void hat_unlock(hat, addr, len)
1867 * unlock the mappings to a given range of addresses
1868 *
1869 * Locks are tracked by ht_lock_cnt in the htable.
1870 */
1871 void
hat_unlock(hat_t * hat,caddr_t addr,size_t len)1872 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1873 {
1874 uintptr_t vaddr = (uintptr_t)addr;
1875 uintptr_t eaddr = vaddr + len;
1876 htable_t *ht = NULL;
1877
1878 /*
1879 * kernel entries are always locked, we don't track lock counts
1880 */
1881 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1882 ASSERT(IS_PAGEALIGNED(vaddr));
1883 ASSERT(IS_PAGEALIGNED(eaddr));
1884 if (hat == kas.a_hat)
1885 return;
1886 if (eaddr > _userlimit)
1887 panic("hat_unlock() address out of range - above _userlimit");
1888
1889 XPV_DISALLOW_MIGRATE();
1890 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1891 while (vaddr < eaddr) {
1892 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1893 if (ht == NULL)
1894 break;
1895
1896 ASSERT(!IN_VA_HOLE(vaddr));
1897
1898 if (ht->ht_lock_cnt < 1)
1899 panic("hat_unlock(): lock_cnt < 1, "
1900 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1901 HTABLE_LOCK_DEC(ht);
1902
1903 vaddr += LEVEL_SIZE(ht->ht_level);
1904 }
1905 if (ht)
1906 htable_release(ht);
1907 XPV_ALLOW_MIGRATE();
1908 }
1909
1910 /* ARGSUSED */
1911 void
hat_unlock_region(struct hat * hat,caddr_t addr,size_t len,hat_region_cookie_t rcookie)1912 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1913 hat_region_cookie_t rcookie)
1914 {
1915 panic("No shared region support on x86");
1916 }
1917
1918 #if !defined(__xpv)
1919 /*
1920 * Cross call service routine to demap a virtual page on
1921 * the current CPU or flush all mappings in TLB.
1922 */
1923 /*ARGSUSED*/
1924 static int
hati_demap_func(xc_arg_t a1,xc_arg_t a2,xc_arg_t a3)1925 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1926 {
1927 hat_t *hat = (hat_t *)a1;
1928 caddr_t addr = (caddr_t)a2;
1929
1930 /*
1931 * If the target hat isn't the kernel and this CPU isn't operating
1932 * in the target hat, we can ignore the cross call.
1933 */
1934 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1935 return (0);
1936
1937 /*
1938 * For a normal address, we just flush one page mapping
1939 */
1940 if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1941 mmu_tlbflush_entry(addr);
1942 return (0);
1943 }
1944
1945 /*
1946 * Otherwise we reload cr3 to effect a complete TLB flush.
1947 *
1948 * A reload of cr3 on a VLP process also means we must also recopy in
1949 * the pte values from the struct hat
1950 */
1951 if (hat->hat_flags & HAT_VLP) {
1952 #if defined(__amd64)
1953 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1954
1955 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1956 #elif defined(__i386)
1957 reload_pae32(hat, CPU);
1958 #endif
1959 }
1960 reload_cr3();
1961 return (0);
1962 }
1963
1964 /*
1965 * Flush all TLB entries, including global (ie. kernel) ones.
1966 */
1967 static void
flush_all_tlb_entries(void)1968 flush_all_tlb_entries(void)
1969 {
1970 ulong_t cr4 = getcr4();
1971
1972 if (cr4 & CR4_PGE) {
1973 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1974 setcr4(cr4);
1975
1976 /*
1977 * 32 bit PAE also needs to always reload_cr3()
1978 */
1979 if (mmu.max_level == 2)
1980 reload_cr3();
1981 } else {
1982 reload_cr3();
1983 }
1984 }
1985
1986 #define TLB_CPU_HALTED (01ul)
1987 #define TLB_INVAL_ALL (02ul)
1988 #define CAS_TLB_INFO(cpu, old, new) \
1989 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1990
1991 /*
1992 * Record that a CPU is going idle
1993 */
1994 void
tlb_going_idle(void)1995 tlb_going_idle(void)
1996 {
1997 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1998 }
1999
2000 /*
2001 * Service a delayed TLB flush if coming out of being idle.
2002 * It will be called from cpu idle notification with interrupt disabled.
2003 */
2004 void
tlb_service(void)2005 tlb_service(void)
2006 {
2007 ulong_t tlb_info;
2008 ulong_t found;
2009
2010 /*
2011 * We only have to do something if coming out of being idle.
2012 */
2013 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2014 if (tlb_info & TLB_CPU_HALTED) {
2015 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2016
2017 /*
2018 * Atomic clear and fetch of old state.
2019 */
2020 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2021 ASSERT(found & TLB_CPU_HALTED);
2022 tlb_info = found;
2023 SMT_PAUSE();
2024 }
2025 if (tlb_info & TLB_INVAL_ALL)
2026 flush_all_tlb_entries();
2027 }
2028 }
2029 #endif /* !__xpv */
2030
2031 /*
2032 * Internal routine to do cross calls to invalidate a range of pages on
2033 * all CPUs using a given hat.
2034 */
2035 void
hat_tlb_inval(hat_t * hat,uintptr_t va)2036 hat_tlb_inval(hat_t *hat, uintptr_t va)
2037 {
2038 extern int flushes_require_xcalls; /* from mp_startup.c */
2039 cpuset_t justme;
2040 cpuset_t cpus_to_shootdown;
2041 #ifndef __xpv
2042 cpuset_t check_cpus;
2043 cpu_t *cpup;
2044 int c;
2045 #endif
2046
2047 /*
2048 * If the hat is being destroyed, there are no more users, so
2049 * demap need not do anything.
2050 */
2051 if (hat->hat_flags & HAT_FREEING)
2052 return;
2053
2054 /*
2055 * If demapping from a shared pagetable, we best demap the
2056 * entire set of user TLBs, since we don't know what addresses
2057 * these were shared at.
2058 */
2059 if (hat->hat_flags & HAT_SHARED) {
2060 hat = kas.a_hat;
2061 va = DEMAP_ALL_ADDR;
2062 }
2063
2064 /*
2065 * if not running with multiple CPUs, don't use cross calls
2066 */
2067 if (panicstr || !flushes_require_xcalls) {
2068 #ifdef __xpv
2069 if (va == DEMAP_ALL_ADDR)
2070 xen_flush_tlb();
2071 else
2072 xen_flush_va((caddr_t)va);
2073 #else
2074 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2075 #endif
2076 return;
2077 }
2078
2079
2080 /*
2081 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2082 * Otherwise it's just CPUs currently executing in this hat.
2083 */
2084 kpreempt_disable();
2085 CPUSET_ONLY(justme, CPU->cpu_id);
2086 if (hat == kas.a_hat)
2087 cpus_to_shootdown = khat_cpuset;
2088 else
2089 cpus_to_shootdown = hat->hat_cpus;
2090
2091 #ifndef __xpv
2092 /*
2093 * If any CPUs in the set are idle, just request a delayed flush
2094 * and avoid waking them up.
2095 */
2096 check_cpus = cpus_to_shootdown;
2097 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2098 ulong_t tlb_info;
2099
2100 if (!CPU_IN_SET(check_cpus, c))
2101 continue;
2102 CPUSET_DEL(check_cpus, c);
2103 cpup = cpu[c];
2104 if (cpup == NULL)
2105 continue;
2106
2107 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2108 while (tlb_info == TLB_CPU_HALTED) {
2109 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2110 TLB_CPU_HALTED | TLB_INVAL_ALL);
2111 SMT_PAUSE();
2112 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2113 }
2114 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2115 HATSTAT_INC(hs_tlb_inval_delayed);
2116 CPUSET_DEL(cpus_to_shootdown, c);
2117 }
2118 }
2119 #endif
2120
2121 if (CPUSET_ISNULL(cpus_to_shootdown) ||
2122 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2123
2124 #ifdef __xpv
2125 if (va == DEMAP_ALL_ADDR)
2126 xen_flush_tlb();
2127 else
2128 xen_flush_va((caddr_t)va);
2129 #else
2130 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2131 #endif
2132
2133 } else {
2134
2135 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2136 #ifdef __xpv
2137 if (va == DEMAP_ALL_ADDR)
2138 xen_gflush_tlb(cpus_to_shootdown);
2139 else
2140 xen_gflush_va((caddr_t)va, cpus_to_shootdown);
2141 #else
2142 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
2143 CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2144 #endif
2145
2146 }
2147 kpreempt_enable();
2148 }
2149
2150 /*
2151 * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2152 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
2153 * handle releasing of the htables.
2154 */
2155 void
hat_pte_unmap(htable_t * ht,uint_t entry,uint_t flags,x86pte_t old_pte,void * pte_ptr)2156 hat_pte_unmap(
2157 htable_t *ht,
2158 uint_t entry,
2159 uint_t flags,
2160 x86pte_t old_pte,
2161 void *pte_ptr)
2162 {
2163 hat_t *hat = ht->ht_hat;
2164 hment_t *hm = NULL;
2165 page_t *pp = NULL;
2166 level_t l = ht->ht_level;
2167 pfn_t pfn;
2168
2169 /*
2170 * We always track the locking counts, even if nothing is unmapped
2171 */
2172 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2173 ASSERT(ht->ht_lock_cnt > 0);
2174 HTABLE_LOCK_DEC(ht);
2175 }
2176
2177 /*
2178 * Figure out which page's mapping list lock to acquire using the PFN
2179 * passed in "old" PTE. We then attempt to invalidate the PTE.
2180 * If another thread, probably a hat_pageunload, has asynchronously
2181 * unmapped/remapped this address we'll loop here.
2182 */
2183 ASSERT(ht->ht_busy > 0);
2184 while (PTE_ISVALID(old_pte)) {
2185 pfn = PTE2PFN(old_pte, l);
2186 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2187 pp = NULL;
2188 } else {
2189 #ifdef __xpv
2190 if (pfn == PFN_INVALID)
2191 panic("Invalid PFN, but not PT_NOCONSIST");
2192 #endif
2193 pp = page_numtopp_nolock(pfn);
2194 if (pp == NULL) {
2195 panic("no page_t, not NOCONSIST: old_pte="
2196 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2197 old_pte, (uintptr_t)ht, entry,
2198 (uintptr_t)pte_ptr);
2199 }
2200 x86_hm_enter(pp);
2201 }
2202
2203 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
2204
2205 /*
2206 * If the page hadn't changed we've unmapped it and can proceed
2207 */
2208 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2209 break;
2210
2211 /*
2212 * Otherwise, we'll have to retry with the current old_pte.
2213 * Drop the hment lock, since the pfn may have changed.
2214 */
2215 if (pp != NULL) {
2216 x86_hm_exit(pp);
2217 pp = NULL;
2218 } else {
2219 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2220 }
2221 }
2222
2223 /*
2224 * If the old mapping wasn't valid, there's nothing more to do
2225 */
2226 if (!PTE_ISVALID(old_pte)) {
2227 if (pp != NULL)
2228 x86_hm_exit(pp);
2229 return;
2230 }
2231
2232 /*
2233 * Take care of syncing any MOD/REF bits and removing the hment.
2234 */
2235 if (pp != NULL) {
2236 if (!(flags & HAT_UNLOAD_NOSYNC))
2237 hati_sync_pte_to_page(pp, old_pte, l);
2238 hm = hment_remove(pp, ht, entry);
2239 x86_hm_exit(pp);
2240 if (hm != NULL)
2241 hment_free(hm);
2242 }
2243
2244 /*
2245 * Handle book keeping in the htable and hat
2246 */
2247 ASSERT(ht->ht_valid_cnt > 0);
2248 HTABLE_DEC(ht->ht_valid_cnt);
2249 PGCNT_DEC(hat, l);
2250 }
2251
2252 /*
2253 * very cheap unload implementation to special case some kernel addresses
2254 */
2255 static void
hat_kmap_unload(caddr_t addr,size_t len,uint_t flags)2256 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2257 {
2258 uintptr_t va = (uintptr_t)addr;
2259 uintptr_t eva = va + len;
2260 pgcnt_t pg_index;
2261 htable_t *ht;
2262 uint_t entry;
2263 x86pte_t *pte_ptr;
2264 x86pte_t old_pte;
2265
2266 for (; va < eva; va += MMU_PAGESIZE) {
2267 /*
2268 * Get the PTE
2269 */
2270 pg_index = mmu_btop(va - mmu.kmap_addr);
2271 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2272 old_pte = GET_PTE(pte_ptr);
2273
2274 /*
2275 * get the htable / entry
2276 */
2277 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2278 >> LEVEL_SHIFT(1)];
2279 entry = htable_va2entry(va, ht);
2280
2281 /*
2282 * use mostly common code to unmap it.
2283 */
2284 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
2285 }
2286 }
2287
2288
2289 /*
2290 * unload a range of virtual address space (no callback)
2291 */
2292 void
hat_unload(hat_t * hat,caddr_t addr,size_t len,uint_t flags)2293 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2294 {
2295 uintptr_t va = (uintptr_t)addr;
2296
2297 XPV_DISALLOW_MIGRATE();
2298 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2299
2300 /*
2301 * special case for performance.
2302 */
2303 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2304 ASSERT(hat == kas.a_hat);
2305 hat_kmap_unload(addr, len, flags);
2306 } else {
2307 hat_unload_callback(hat, addr, len, flags, NULL);
2308 }
2309 XPV_ALLOW_MIGRATE();
2310 }
2311
2312 /*
2313 * Do the callbacks for ranges being unloaded.
2314 */
2315 typedef struct range_info {
2316 uintptr_t rng_va;
2317 ulong_t rng_cnt;
2318 level_t rng_level;
2319 } range_info_t;
2320
2321 static void
handle_ranges(hat_callback_t * cb,uint_t cnt,range_info_t * range)2322 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
2323 {
2324 /*
2325 * do callbacks to upper level VM system
2326 */
2327 while (cb != NULL && cnt > 0) {
2328 --cnt;
2329 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2330 cb->hcb_end_addr = cb->hcb_start_addr;
2331 cb->hcb_end_addr +=
2332 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
2333 cb->hcb_function(cb);
2334 }
2335 }
2336
2337 /*
2338 * Unload a given range of addresses (has optional callback)
2339 *
2340 * Flags:
2341 * define HAT_UNLOAD 0x00
2342 * define HAT_UNLOAD_NOSYNC 0x02
2343 * define HAT_UNLOAD_UNLOCK 0x04
2344 * define HAT_UNLOAD_OTHER 0x08 - not used
2345 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
2346 */
2347 #define MAX_UNLOAD_CNT (8)
2348 void
hat_unload_callback(hat_t * hat,caddr_t addr,size_t len,uint_t flags,hat_callback_t * cb)2349 hat_unload_callback(
2350 hat_t *hat,
2351 caddr_t addr,
2352 size_t len,
2353 uint_t flags,
2354 hat_callback_t *cb)
2355 {
2356 uintptr_t vaddr = (uintptr_t)addr;
2357 uintptr_t eaddr = vaddr + len;
2358 htable_t *ht = NULL;
2359 uint_t entry;
2360 uintptr_t contig_va = (uintptr_t)-1L;
2361 range_info_t r[MAX_UNLOAD_CNT];
2362 uint_t r_cnt = 0;
2363 x86pte_t old_pte;
2364
2365 XPV_DISALLOW_MIGRATE();
2366 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2367 ASSERT(IS_PAGEALIGNED(vaddr));
2368 ASSERT(IS_PAGEALIGNED(eaddr));
2369
2370 /*
2371 * Special case a single page being unloaded for speed. This happens
2372 * quite frequently, COW faults after a fork() for example.
2373 */
2374 if (cb == NULL && len == MMU_PAGESIZE) {
2375 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2376 if (ht != NULL) {
2377 if (PTE_ISVALID(old_pte))
2378 hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2379 htable_release(ht);
2380 }
2381 XPV_ALLOW_MIGRATE();
2382 return;
2383 }
2384
2385 while (vaddr < eaddr) {
2386 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2387 if (ht == NULL)
2388 break;
2389
2390 ASSERT(!IN_VA_HOLE(vaddr));
2391
2392 if (vaddr < (uintptr_t)addr)
2393 panic("hat_unload_callback(): unmap inside large page");
2394
2395 /*
2396 * We'll do the call backs for contiguous ranges
2397 */
2398 if (vaddr != contig_va ||
2399 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2400 if (r_cnt == MAX_UNLOAD_CNT) {
2401 handle_ranges(cb, r_cnt, r);
2402 r_cnt = 0;
2403 }
2404 r[r_cnt].rng_va = vaddr;
2405 r[r_cnt].rng_cnt = 0;
2406 r[r_cnt].rng_level = ht->ht_level;
2407 ++r_cnt;
2408 }
2409
2410 /*
2411 * Unload one mapping from the page tables.
2412 */
2413 entry = htable_va2entry(vaddr, ht);
2414 hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2415 ASSERT(ht->ht_level <= mmu.max_page_level);
2416 vaddr += LEVEL_SIZE(ht->ht_level);
2417 contig_va = vaddr;
2418 ++r[r_cnt - 1].rng_cnt;
2419 }
2420 if (ht)
2421 htable_release(ht);
2422
2423 /*
2424 * handle last range for callbacks
2425 */
2426 if (r_cnt > 0)
2427 handle_ranges(cb, r_cnt, r);
2428 XPV_ALLOW_MIGRATE();
2429 }
2430
2431 /*
2432 * Invalidate a virtual address translation on a slave CPU during
2433 * panic() dumps.
2434 */
2435 void
hat_flush_range(hat_t * hat,caddr_t va,size_t size)2436 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2437 {
2438 ssize_t sz;
2439 caddr_t endva = va + size;
2440
2441 while (va < endva) {
2442 sz = hat_getpagesize(hat, va);
2443 if (sz < 0) {
2444 #ifdef __xpv
2445 xen_flush_tlb();
2446 #else
2447 flush_all_tlb_entries();
2448 #endif
2449 break;
2450 }
2451 #ifdef __xpv
2452 xen_flush_va(va);
2453 #else
2454 mmu_tlbflush_entry(va);
2455 #endif
2456 va += sz;
2457 }
2458 }
2459
2460 /*
2461 * synchronize mapping with software data structures
2462 *
2463 * This interface is currently only used by the working set monitor
2464 * driver.
2465 */
2466 /*ARGSUSED*/
2467 void
hat_sync(hat_t * hat,caddr_t addr,size_t len,uint_t flags)2468 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2469 {
2470 uintptr_t vaddr = (uintptr_t)addr;
2471 uintptr_t eaddr = vaddr + len;
2472 htable_t *ht = NULL;
2473 uint_t entry;
2474 x86pte_t pte;
2475 x86pte_t save_pte;
2476 x86pte_t new;
2477 page_t *pp;
2478
2479 ASSERT(!IN_VA_HOLE(vaddr));
2480 ASSERT(IS_PAGEALIGNED(vaddr));
2481 ASSERT(IS_PAGEALIGNED(eaddr));
2482 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2483
2484 XPV_DISALLOW_MIGRATE();
2485 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2486 try_again:
2487 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2488 if (ht == NULL)
2489 break;
2490 entry = htable_va2entry(vaddr, ht);
2491
2492 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2493 PTE_GET(pte, PT_REF | PT_MOD) == 0)
2494 continue;
2495
2496 /*
2497 * We need to acquire the mapping list lock to protect
2498 * against hat_pageunload(), hat_unload(), etc.
2499 */
2500 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2501 if (pp == NULL)
2502 break;
2503 x86_hm_enter(pp);
2504 save_pte = pte;
2505 pte = x86pte_get(ht, entry);
2506 if (pte != save_pte) {
2507 x86_hm_exit(pp);
2508 goto try_again;
2509 }
2510 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2511 PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2512 x86_hm_exit(pp);
2513 continue;
2514 }
2515
2516 /*
2517 * Need to clear ref or mod bits. We may compete with
2518 * hardware updating the R/M bits and have to try again.
2519 */
2520 if (flags == HAT_SYNC_ZERORM) {
2521 new = pte;
2522 PTE_CLR(new, PT_REF | PT_MOD);
2523 pte = hati_update_pte(ht, entry, pte, new);
2524 if (pte != 0) {
2525 x86_hm_exit(pp);
2526 goto try_again;
2527 }
2528 } else {
2529 /*
2530 * sync the PTE to the page_t
2531 */
2532 hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2533 }
2534 x86_hm_exit(pp);
2535 }
2536 if (ht)
2537 htable_release(ht);
2538 XPV_ALLOW_MIGRATE();
2539 }
2540
2541 /*
2542 * void hat_map(hat, addr, len, flags)
2543 */
2544 /*ARGSUSED*/
2545 void
hat_map(hat_t * hat,caddr_t addr,size_t len,uint_t flags)2546 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2547 {
2548 /* does nothing */
2549 }
2550
2551 /*
2552 * uint_t hat_getattr(hat, addr, *attr)
2553 * returns attr for <hat,addr> in *attr. returns 0 if there was a
2554 * mapping and *attr is valid, nonzero if there was no mapping and
2555 * *attr is not valid.
2556 */
2557 uint_t
hat_getattr(hat_t * hat,caddr_t addr,uint_t * attr)2558 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2559 {
2560 uintptr_t vaddr = ALIGN2PAGE(addr);
2561 htable_t *ht = NULL;
2562 x86pte_t pte;
2563
2564 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2565
2566 if (IN_VA_HOLE(vaddr))
2567 return ((uint_t)-1);
2568
2569 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2570 if (ht == NULL)
2571 return ((uint_t)-1);
2572
2573 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2574 htable_release(ht);
2575 return ((uint_t)-1);
2576 }
2577
2578 *attr = PROT_READ;
2579 if (PTE_GET(pte, PT_WRITABLE))
2580 *attr |= PROT_WRITE;
2581 if (PTE_GET(pte, PT_USER))
2582 *attr |= PROT_USER;
2583 if (!PTE_GET(pte, mmu.pt_nx))
2584 *attr |= PROT_EXEC;
2585 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2586 *attr |= HAT_NOSYNC;
2587 htable_release(ht);
2588 return (0);
2589 }
2590
2591 /*
2592 * hat_updateattr() applies the given attribute change to an existing mapping
2593 */
2594 #define HAT_LOAD_ATTR 1
2595 #define HAT_SET_ATTR 2
2596 #define HAT_CLR_ATTR 3
2597
2598 static void
hat_updateattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr,int what)2599 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2600 {
2601 uintptr_t vaddr = (uintptr_t)addr;
2602 uintptr_t eaddr = (uintptr_t)addr + len;
2603 htable_t *ht = NULL;
2604 uint_t entry;
2605 x86pte_t oldpte, newpte;
2606 page_t *pp;
2607
2608 XPV_DISALLOW_MIGRATE();
2609 ASSERT(IS_PAGEALIGNED(vaddr));
2610 ASSERT(IS_PAGEALIGNED(eaddr));
2611 ASSERT(hat == kas.a_hat ||
2612 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2613 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2614 try_again:
2615 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2616 if (ht == NULL)
2617 break;
2618 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2619 continue;
2620
2621 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2622 if (pp == NULL)
2623 continue;
2624 x86_hm_enter(pp);
2625
2626 newpte = oldpte;
2627 /*
2628 * We found a page table entry in the desired range,
2629 * figure out the new attributes.
2630 */
2631 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2632 if ((attr & PROT_WRITE) &&
2633 !PTE_GET(oldpte, PT_WRITABLE))
2634 newpte |= PT_WRITABLE;
2635
2636 if ((attr & HAT_NOSYNC) &&
2637 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2638 newpte |= PT_NOSYNC;
2639
2640 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2641 newpte &= ~mmu.pt_nx;
2642 }
2643
2644 if (what == HAT_LOAD_ATTR) {
2645 if (!(attr & PROT_WRITE) &&
2646 PTE_GET(oldpte, PT_WRITABLE))
2647 newpte &= ~PT_WRITABLE;
2648
2649 if (!(attr & HAT_NOSYNC) &&
2650 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2651 newpte &= ~PT_SOFTWARE;
2652
2653 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2654 newpte |= mmu.pt_nx;
2655 }
2656
2657 if (what == HAT_CLR_ATTR) {
2658 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2659 newpte &= ~PT_WRITABLE;
2660
2661 if ((attr & HAT_NOSYNC) &&
2662 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2663 newpte &= ~PT_SOFTWARE;
2664
2665 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2666 newpte |= mmu.pt_nx;
2667 }
2668
2669 /*
2670 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2671 * x86pte_set() depends on this.
2672 */
2673 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2674 newpte |= PT_REF | PT_MOD;
2675
2676 /*
2677 * what about PROT_READ or others? this code only handles:
2678 * EXEC, WRITE, NOSYNC
2679 */
2680
2681 /*
2682 * If new PTE really changed, update the table.
2683 */
2684 if (newpte != oldpte) {
2685 entry = htable_va2entry(vaddr, ht);
2686 oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2687 if (oldpte != 0) {
2688 x86_hm_exit(pp);
2689 goto try_again;
2690 }
2691 }
2692 x86_hm_exit(pp);
2693 }
2694 if (ht)
2695 htable_release(ht);
2696 XPV_ALLOW_MIGRATE();
2697 }
2698
2699 /*
2700 * Various wrappers for hat_updateattr()
2701 */
2702 void
hat_setattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)2703 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2704 {
2705 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2706 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2707 }
2708
2709 void
hat_clrattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)2710 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2711 {
2712 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2713 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2714 }
2715
2716 void
hat_chgattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)2717 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2718 {
2719 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2720 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2721 }
2722
2723 void
hat_chgprot(hat_t * hat,caddr_t addr,size_t len,uint_t vprot)2724 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2725 {
2726 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2727 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2728 }
2729
2730 /*
2731 * size_t hat_getpagesize(hat, addr)
2732 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
2733 * no mapping. This is an advisory call.
2734 */
2735 ssize_t
hat_getpagesize(hat_t * hat,caddr_t addr)2736 hat_getpagesize(hat_t *hat, caddr_t addr)
2737 {
2738 uintptr_t vaddr = ALIGN2PAGE(addr);
2739 htable_t *ht;
2740 size_t pagesize;
2741
2742 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2743 if (IN_VA_HOLE(vaddr))
2744 return (-1);
2745 ht = htable_getpage(hat, vaddr, NULL);
2746 if (ht == NULL)
2747 return (-1);
2748 pagesize = LEVEL_SIZE(ht->ht_level);
2749 htable_release(ht);
2750 return (pagesize);
2751 }
2752
2753
2754
2755 /*
2756 * pfn_t hat_getpfnum(hat, addr)
2757 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2758 */
2759 pfn_t
hat_getpfnum(hat_t * hat,caddr_t addr)2760 hat_getpfnum(hat_t *hat, caddr_t addr)
2761 {
2762 uintptr_t vaddr = ALIGN2PAGE(addr);
2763 htable_t *ht;
2764 uint_t entry;
2765 pfn_t pfn = PFN_INVALID;
2766
2767 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2768 if (khat_running == 0)
2769 return (PFN_INVALID);
2770
2771 if (IN_VA_HOLE(vaddr))
2772 return (PFN_INVALID);
2773
2774 XPV_DISALLOW_MIGRATE();
2775 /*
2776 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2777 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2778 * this up.
2779 */
2780 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2781 x86pte_t pte;
2782 pgcnt_t pg_index;
2783
2784 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2785 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2786 if (PTE_ISVALID(pte))
2787 /*LINTED [use of constant 0 causes a lint warning] */
2788 pfn = PTE2PFN(pte, 0);
2789 XPV_ALLOW_MIGRATE();
2790 return (pfn);
2791 }
2792
2793 ht = htable_getpage(hat, vaddr, &entry);
2794 if (ht == NULL) {
2795 XPV_ALLOW_MIGRATE();
2796 return (PFN_INVALID);
2797 }
2798 ASSERT(vaddr >= ht->ht_vaddr);
2799 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2800 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2801 if (ht->ht_level > 0)
2802 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2803 htable_release(ht);
2804 XPV_ALLOW_MIGRATE();
2805 return (pfn);
2806 }
2807
2808 /*
2809 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
2810 * Use hat_getpfnum(kas.a_hat, ...) instead.
2811 *
2812 * We'd like to return PFN_INVALID if the mappings have underlying page_t's
2813 * but can't right now due to the fact that some software has grown to use
2814 * this interface incorrectly. So for now when the interface is misused,
2815 * return a warning to the user that in the future it won't work in the
2816 * way they're abusing it, and carry on.
2817 *
2818 * Note that hat_getkpfnum() is never supported on amd64.
2819 */
2820 #if !defined(__amd64)
2821 pfn_t
hat_getkpfnum(caddr_t addr)2822 hat_getkpfnum(caddr_t addr)
2823 {
2824 pfn_t pfn;
2825 int badcaller = 0;
2826
2827 if (khat_running == 0)
2828 panic("hat_getkpfnum(): called too early\n");
2829 if ((uintptr_t)addr < kernelbase)
2830 return (PFN_INVALID);
2831
2832 XPV_DISALLOW_MIGRATE();
2833 if (segkpm && IS_KPM_ADDR(addr)) {
2834 badcaller = 1;
2835 pfn = hat_kpm_va2pfn(addr);
2836 } else {
2837 pfn = hat_getpfnum(kas.a_hat, addr);
2838 badcaller = pf_is_memory(pfn);
2839 }
2840
2841 if (badcaller)
2842 hat_getkpfnum_badcall(caller());
2843 XPV_ALLOW_MIGRATE();
2844 return (pfn);
2845 }
2846 #endif /* __amd64 */
2847
2848 /*
2849 * int hat_probe(hat, addr)
2850 * return 0 if no valid mapping is present. Faster version
2851 * of hat_getattr in certain architectures.
2852 */
2853 int
hat_probe(hat_t * hat,caddr_t addr)2854 hat_probe(hat_t *hat, caddr_t addr)
2855 {
2856 uintptr_t vaddr = ALIGN2PAGE(addr);
2857 uint_t entry;
2858 htable_t *ht;
2859 pgcnt_t pg_off;
2860
2861 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2862 ASSERT(hat == kas.a_hat ||
2863 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2864 if (IN_VA_HOLE(vaddr))
2865 return (0);
2866
2867 /*
2868 * Most common use of hat_probe is from segmap. We special case it
2869 * for performance.
2870 */
2871 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2872 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2873 if (mmu.pae_hat)
2874 return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2875 else
2876 return (PTE_ISVALID(
2877 ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2878 }
2879
2880 ht = htable_getpage(hat, vaddr, &entry);
2881 htable_release(ht);
2882 return (ht != NULL);
2883 }
2884
2885 /*
2886 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2887 */
2888 static int
is_it_dism(hat_t * hat,caddr_t va)2889 is_it_dism(hat_t *hat, caddr_t va)
2890 {
2891 struct seg *seg;
2892 struct shm_data *shmd;
2893 struct spt_data *sptd;
2894
2895 seg = as_findseg(hat->hat_as, va, 0);
2896 ASSERT(seg != NULL);
2897 ASSERT(seg->s_base <= va);
2898 shmd = (struct shm_data *)seg->s_data;
2899 ASSERT(shmd != NULL);
2900 sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2901 ASSERT(sptd != NULL);
2902 if (sptd->spt_flags & SHM_PAGEABLE)
2903 return (1);
2904 return (0);
2905 }
2906
2907 /*
2908 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2909 * except that we use the ism_hat's existing mappings to determine the pages
2910 * and protections to use for this hat. If we find a full properly aligned
2911 * and sized pagetable, we will attempt to share the pagetable itself.
2912 */
2913 /*ARGSUSED*/
2914 int
hat_share(hat_t * hat,caddr_t addr,hat_t * ism_hat,caddr_t src_addr,size_t len,uint_t ismszc)2915 hat_share(
2916 hat_t *hat,
2917 caddr_t addr,
2918 hat_t *ism_hat,
2919 caddr_t src_addr,
2920 size_t len, /* almost useless value, see below.. */
2921 uint_t ismszc)
2922 {
2923 uintptr_t vaddr_start = (uintptr_t)addr;
2924 uintptr_t vaddr;
2925 uintptr_t eaddr = vaddr_start + len;
2926 uintptr_t ism_addr_start = (uintptr_t)src_addr;
2927 uintptr_t ism_addr = ism_addr_start;
2928 uintptr_t e_ism_addr = ism_addr + len;
2929 htable_t *ism_ht = NULL;
2930 htable_t *ht;
2931 x86pte_t pte;
2932 page_t *pp;
2933 pfn_t pfn;
2934 level_t l;
2935 pgcnt_t pgcnt;
2936 uint_t prot;
2937 int is_dism;
2938 int flags;
2939
2940 /*
2941 * We might be asked to share an empty DISM hat by as_dup()
2942 */
2943 ASSERT(hat != kas.a_hat);
2944 ASSERT(eaddr <= _userlimit);
2945 if (!(ism_hat->hat_flags & HAT_SHARED)) {
2946 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2947 return (0);
2948 }
2949 XPV_DISALLOW_MIGRATE();
2950
2951 /*
2952 * The SPT segment driver often passes us a size larger than there are
2953 * valid mappings. That's because it rounds the segment size up to a
2954 * large pagesize, even if the actual memory mapped by ism_hat is less.
2955 */
2956 ASSERT(IS_PAGEALIGNED(vaddr_start));
2957 ASSERT(IS_PAGEALIGNED(ism_addr_start));
2958 ASSERT(ism_hat->hat_flags & HAT_SHARED);
2959 is_dism = is_it_dism(hat, addr);
2960 while (ism_addr < e_ism_addr) {
2961 /*
2962 * use htable_walk to get the next valid ISM mapping
2963 */
2964 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2965 if (ism_ht == NULL)
2966 break;
2967
2968 /*
2969 * First check to see if we already share the page table.
2970 */
2971 l = ism_ht->ht_level;
2972 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2973 ht = htable_lookup(hat, vaddr, l);
2974 if (ht != NULL) {
2975 if (ht->ht_flags & HTABLE_SHARED_PFN)
2976 goto shared;
2977 htable_release(ht);
2978 goto not_shared;
2979 }
2980
2981 /*
2982 * Can't ever share top table.
2983 */
2984 if (l == mmu.max_level)
2985 goto not_shared;
2986
2987 /*
2988 * Avoid level mismatches later due to DISM faults.
2989 */
2990 if (is_dism && l > 0)
2991 goto not_shared;
2992
2993 /*
2994 * addresses and lengths must align
2995 * table must be fully populated
2996 * no lower level page tables
2997 */
2998 if (ism_addr != ism_ht->ht_vaddr ||
2999 (vaddr & LEVEL_OFFSET(l + 1)) != 0)
3000 goto not_shared;
3001
3002 /*
3003 * The range of address space must cover a full table.
3004 */
3005 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
3006 goto not_shared;
3007
3008 /*
3009 * All entries in the ISM page table must be leaf PTEs.
3010 */
3011 if (l > 0) {
3012 int e;
3013
3014 /*
3015 * We know the 0th is from htable_walk() above.
3016 */
3017 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3018 x86pte_t pte;
3019 pte = x86pte_get(ism_ht, e);
3020 if (!PTE_ISPAGE(pte, l))
3021 goto not_shared;
3022 }
3023 }
3024
3025 /*
3026 * share the page table
3027 */
3028 ht = htable_create(hat, vaddr, l, ism_ht);
3029 shared:
3030 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3031 ASSERT(ht->ht_shares == ism_ht);
3032 hat->hat_ism_pgcnt +=
3033 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3034 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3035 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3036 htable_release(ht);
3037 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3038 htable_release(ism_ht);
3039 ism_ht = NULL;
3040 continue;
3041
3042 not_shared:
3043 /*
3044 * Unable to share the page table. Instead we will
3045 * create new mappings from the values in the ISM mappings.
3046 * Figure out what level size mappings to use;
3047 */
3048 for (l = ism_ht->ht_level; l > 0; --l) {
3049 if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3050 (vaddr & LEVEL_OFFSET(l)) == 0)
3051 break;
3052 }
3053
3054 /*
3055 * The ISM mapping might be larger than the share area,
3056 * be careful to truncate it if needed.
3057 */
3058 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3059 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3060 } else {
3061 pgcnt = mmu_btop(eaddr - vaddr);
3062 l = 0;
3063 }
3064
3065 pfn = PTE2PFN(pte, ism_ht->ht_level);
3066 ASSERT(pfn != PFN_INVALID);
3067 while (pgcnt > 0) {
3068 /*
3069 * Make a new pte for the PFN for this level.
3070 * Copy protections for the pte from the ISM pte.
3071 */
3072 pp = page_numtopp_nolock(pfn);
3073 ASSERT(pp != NULL);
3074
3075 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3076 if (PTE_GET(pte, PT_WRITABLE))
3077 prot |= PROT_WRITE;
3078 if (!PTE_GET(pte, PT_NX))
3079 prot |= PROT_EXEC;
3080
3081 flags = HAT_LOAD;
3082 if (!is_dism)
3083 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3084 while (hati_load_common(hat, vaddr, pp, prot, flags,
3085 l, pfn) != 0) {
3086 if (l == 0)
3087 panic("hati_load_common() failure");
3088 --l;
3089 }
3090
3091 vaddr += LEVEL_SIZE(l);
3092 ism_addr += LEVEL_SIZE(l);
3093 pfn += mmu_btop(LEVEL_SIZE(l));
3094 pgcnt -= mmu_btop(LEVEL_SIZE(l));
3095 }
3096 }
3097 if (ism_ht != NULL)
3098 htable_release(ism_ht);
3099 XPV_ALLOW_MIGRATE();
3100 return (0);
3101 }
3102
3103
3104 /*
3105 * hat_unshare() is similar to hat_unload_callback(), but
3106 * we have to look for empty shared pagetables. Note that
3107 * hat_unshare() is always invoked against an entire segment.
3108 */
3109 /*ARGSUSED*/
3110 void
hat_unshare(hat_t * hat,caddr_t addr,size_t len,uint_t ismszc)3111 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3112 {
3113 uint64_t vaddr = (uintptr_t)addr;
3114 uintptr_t eaddr = vaddr + len;
3115 htable_t *ht = NULL;
3116 uint_t need_demaps = 0;
3117 int flags = HAT_UNLOAD_UNMAP;
3118 level_t l;
3119
3120 ASSERT(hat != kas.a_hat);
3121 ASSERT(eaddr <= _userlimit);
3122 ASSERT(IS_PAGEALIGNED(vaddr));
3123 ASSERT(IS_PAGEALIGNED(eaddr));
3124 XPV_DISALLOW_MIGRATE();
3125
3126 /*
3127 * First go through and remove any shared pagetables.
3128 *
3129 * Note that it's ok to delay the TLB shootdown till the entire range is
3130 * finished, because if hat_pageunload() were to unload a shared
3131 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3132 */
3133 l = mmu.max_page_level;
3134 if (l == mmu.max_level)
3135 --l;
3136 for (; l >= 0; --l) {
3137 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3138 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3139 ASSERT(!IN_VA_HOLE(vaddr));
3140 /*
3141 * find a pagetable that maps the current address
3142 */
3143 ht = htable_lookup(hat, vaddr, l);
3144 if (ht == NULL)
3145 continue;
3146 if (ht->ht_flags & HTABLE_SHARED_PFN) {
3147 /*
3148 * clear page count, set valid_cnt to 0,
3149 * let htable_release() finish the job
3150 */
3151 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3152 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3153 ht->ht_valid_cnt = 0;
3154 need_demaps = 1;
3155 }
3156 htable_release(ht);
3157 }
3158 }
3159
3160 /*
3161 * flush the TLBs - since we're probably dealing with MANY mappings
3162 * we do just one CR3 reload.
3163 */
3164 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3165 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3166
3167 /*
3168 * Now go back and clean up any unaligned mappings that
3169 * couldn't share pagetables.
3170 */
3171 if (!is_it_dism(hat, addr))
3172 flags |= HAT_UNLOAD_UNLOCK;
3173 hat_unload(hat, addr, len, flags);
3174 XPV_ALLOW_MIGRATE();
3175 }
3176
3177
3178 /*
3179 * hat_reserve() does nothing
3180 */
3181 /*ARGSUSED*/
3182 void
hat_reserve(struct as * as,caddr_t addr,size_t len)3183 hat_reserve(struct as *as, caddr_t addr, size_t len)
3184 {
3185 }
3186
3187
3188 /*
3189 * Called when all mappings to a page should have write permission removed.
3190 * Mostly stolen from hat_pagesync()
3191 */
3192 static void
hati_page_clrwrt(struct page * pp)3193 hati_page_clrwrt(struct page *pp)
3194 {
3195 hment_t *hm = NULL;
3196 htable_t *ht;
3197 uint_t entry;
3198 x86pte_t old;
3199 x86pte_t new;
3200 uint_t pszc = 0;
3201
3202 XPV_DISALLOW_MIGRATE();
3203 next_size:
3204 /*
3205 * walk thru the mapping list clearing write permission
3206 */
3207 x86_hm_enter(pp);
3208 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3209 if (ht->ht_level < pszc)
3210 continue;
3211 old = x86pte_get(ht, entry);
3212
3213 for (;;) {
3214 /*
3215 * Is this mapping of interest?
3216 */
3217 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3218 PTE_GET(old, PT_WRITABLE) == 0)
3219 break;
3220
3221 /*
3222 * Clear ref/mod writable bits. This requires cross
3223 * calls to ensure any executing TLBs see cleared bits.
3224 */
3225 new = old;
3226 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3227 old = hati_update_pte(ht, entry, old, new);
3228 if (old != 0)
3229 continue;
3230
3231 break;
3232 }
3233 }
3234 x86_hm_exit(pp);
3235 while (pszc < pp->p_szc) {
3236 page_t *tpp;
3237 pszc++;
3238 tpp = PP_GROUPLEADER(pp, pszc);
3239 if (pp != tpp) {
3240 pp = tpp;
3241 goto next_size;
3242 }
3243 }
3244 XPV_ALLOW_MIGRATE();
3245 }
3246
3247 /*
3248 * void hat_page_setattr(pp, flag)
3249 * void hat_page_clrattr(pp, flag)
3250 * used to set/clr ref/mod bits.
3251 */
3252 void
hat_page_setattr(struct page * pp,uint_t flag)3253 hat_page_setattr(struct page *pp, uint_t flag)
3254 {
3255 vnode_t *vp = pp->p_vnode;
3256 kmutex_t *vphm = NULL;
3257 page_t **listp;
3258 int noshuffle;
3259
3260 noshuffle = flag & P_NSH;
3261 flag &= ~P_NSH;
3262
3263 if (PP_GETRM(pp, flag) == flag)
3264 return;
3265
3266 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3267 !noshuffle) {
3268 vphm = page_vnode_mutex(vp);
3269 mutex_enter(vphm);
3270 }
3271
3272 PP_SETRM(pp, flag);
3273
3274 if (vphm != NULL) {
3275
3276 /*
3277 * Some File Systems examine v_pages for NULL w/o
3278 * grabbing the vphm mutex. Must not let it become NULL when
3279 * pp is the only page on the list.
3280 */
3281 if (pp->p_vpnext != pp) {
3282 page_vpsub(&vp->v_pages, pp);
3283 if (vp->v_pages != NULL)
3284 listp = &vp->v_pages->p_vpprev->p_vpnext;
3285 else
3286 listp = &vp->v_pages;
3287 page_vpadd(listp, pp);
3288 }
3289 mutex_exit(vphm);
3290 }
3291 }
3292
3293 void
hat_page_clrattr(struct page * pp,uint_t flag)3294 hat_page_clrattr(struct page *pp, uint_t flag)
3295 {
3296 vnode_t *vp = pp->p_vnode;
3297 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3298
3299 /*
3300 * Caller is expected to hold page's io lock for VMODSORT to work
3301 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3302 * bit is cleared.
3303 * We don't have assert to avoid tripping some existing third party
3304 * code. The dirty page is moved back to top of the v_page list
3305 * after IO is done in pvn_write_done().
3306 */
3307 PP_CLRRM(pp, flag);
3308
3309 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3310
3311 /*
3312 * VMODSORT works by removing write permissions and getting
3313 * a fault when a page is made dirty. At this point
3314 * we need to remove write permission from all mappings
3315 * to this page.
3316 */
3317 hati_page_clrwrt(pp);
3318 }
3319 }
3320
3321 /*
3322 * If flag is specified, returns 0 if attribute is disabled
3323 * and non zero if enabled. If flag specifes multiple attributes
3324 * then returns 0 if ALL attributes are disabled. This is an advisory
3325 * call.
3326 */
3327 uint_t
hat_page_getattr(struct page * pp,uint_t flag)3328 hat_page_getattr(struct page *pp, uint_t flag)
3329 {
3330 return (PP_GETRM(pp, flag));
3331 }
3332
3333
3334 /*
3335 * common code used by hat_pageunload() and hment_steal()
3336 */
3337 hment_t *
hati_page_unmap(page_t * pp,htable_t * ht,uint_t entry)3338 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3339 {
3340 x86pte_t old_pte;
3341 pfn_t pfn = pp->p_pagenum;
3342 hment_t *hm;
3343
3344 /*
3345 * We need to acquire a hold on the htable in order to
3346 * do the invalidate. We know the htable must exist, since
3347 * unmap's don't release the htable until after removing any
3348 * hment. Having x86_hm_enter() keeps that from proceeding.
3349 */
3350 htable_acquire(ht);
3351
3352 /*
3353 * Invalidate the PTE and remove the hment.
3354 */
3355 old_pte = x86pte_inval(ht, entry, 0, NULL);
3356 if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3357 panic("x86pte_inval() failure found PTE = " FMT_PTE
3358 " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3359 old_pte, pfn, (uintptr_t)ht, entry);
3360 }
3361
3362 /*
3363 * Clean up all the htable information for this mapping
3364 */
3365 ASSERT(ht->ht_valid_cnt > 0);
3366 HTABLE_DEC(ht->ht_valid_cnt);
3367 PGCNT_DEC(ht->ht_hat, ht->ht_level);
3368
3369 /*
3370 * sync ref/mod bits to the page_t
3371 */
3372 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3373 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3374
3375 /*
3376 * Remove the mapping list entry for this page.
3377 */
3378 hm = hment_remove(pp, ht, entry);
3379
3380 /*
3381 * drop the mapping list lock so that we might free the
3382 * hment and htable.
3383 */
3384 x86_hm_exit(pp);
3385 htable_release(ht);
3386 return (hm);
3387 }
3388
3389 extern int vpm_enable;
3390 /*
3391 * Unload all translations to a page. If the page is a subpage of a large
3392 * page, the large page mappings are also removed.
3393 *
3394 * The forceflags are unused.
3395 */
3396
3397 /*ARGSUSED*/
3398 static int
hati_pageunload(struct page * pp,uint_t pg_szcd,uint_t forceflag)3399 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3400 {
3401 page_t *cur_pp = pp;
3402 hment_t *hm;
3403 hment_t *prev;
3404 htable_t *ht;
3405 uint_t entry;
3406 level_t level;
3407
3408 XPV_DISALLOW_MIGRATE();
3409
3410 /*
3411 * prevent recursion due to kmem_free()
3412 */
3413 ++curthread->t_hatdepth;
3414 ASSERT(curthread->t_hatdepth < 16);
3415
3416 #if defined(__amd64)
3417 /*
3418 * clear the vpm ref.
3419 */
3420 if (vpm_enable) {
3421 pp->p_vpmref = 0;
3422 }
3423 #endif
3424 /*
3425 * The loop with next_size handles pages with multiple pagesize mappings
3426 */
3427 next_size:
3428 for (;;) {
3429
3430 /*
3431 * Get a mapping list entry
3432 */
3433 x86_hm_enter(cur_pp);
3434 for (prev = NULL; ; prev = hm) {
3435 hm = hment_walk(cur_pp, &ht, &entry, prev);
3436 if (hm == NULL) {
3437 x86_hm_exit(cur_pp);
3438
3439 /*
3440 * If not part of a larger page, we're done.
3441 */
3442 if (cur_pp->p_szc <= pg_szcd) {
3443 ASSERT(curthread->t_hatdepth > 0);
3444 --curthread->t_hatdepth;
3445 XPV_ALLOW_MIGRATE();
3446 return (0);
3447 }
3448
3449 /*
3450 * Else check the next larger page size.
3451 * hat_page_demote() may decrease p_szc
3452 * but that's ok we'll just take an extra
3453 * trip discover there're no larger mappings
3454 * and return.
3455 */
3456 ++pg_szcd;
3457 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3458 goto next_size;
3459 }
3460
3461 /*
3462 * If this mapping size matches, remove it.
3463 */
3464 level = ht->ht_level;
3465 if (level == pg_szcd)
3466 break;
3467 }
3468
3469 /*
3470 * Remove the mapping list entry for this page.
3471 * Note this does the x86_hm_exit() for us.
3472 */
3473 hm = hati_page_unmap(cur_pp, ht, entry);
3474 if (hm != NULL)
3475 hment_free(hm);
3476 }
3477 }
3478
3479 int
hat_pageunload(struct page * pp,uint_t forceflag)3480 hat_pageunload(struct page *pp, uint_t forceflag)
3481 {
3482 ASSERT(PAGE_EXCL(pp));
3483 return (hati_pageunload(pp, 0, forceflag));
3484 }
3485
3486 /*
3487 * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3488 * page level that included pp.
3489 *
3490 * pp must be locked EXCL. Even though no other constituent pages are locked
3491 * it's legal to unload large mappings to pp because all constituent pages of
3492 * large locked mappings have to be locked SHARED. therefore if we have EXCL
3493 * lock on one of constituent pages none of the large mappings to pp are
3494 * locked.
3495 *
3496 * Change (always decrease) p_szc field starting from the last constituent
3497 * page and ending with root constituent page so that root's pszc always shows
3498 * the area where hat_page_demote() may be active.
3499 *
3500 * This mechanism is only used for file system pages where it's not always
3501 * possible to get EXCL locks on all constituent pages to demote the size code
3502 * (as is done for anonymous or kernel large pages).
3503 */
3504 void
hat_page_demote(page_t * pp)3505 hat_page_demote(page_t *pp)
3506 {
3507 uint_t pszc;
3508 uint_t rszc;
3509 uint_t szc;
3510 page_t *rootpp;
3511 page_t *firstpp;
3512 page_t *lastpp;
3513 pgcnt_t pgcnt;
3514
3515 ASSERT(PAGE_EXCL(pp));
3516 ASSERT(!PP_ISFREE(pp));
3517 ASSERT(page_szc_lock_assert(pp));
3518
3519 if (pp->p_szc == 0)
3520 return;
3521
3522 rootpp = PP_GROUPLEADER(pp, 1);
3523 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3524
3525 /*
3526 * all large mappings to pp are gone
3527 * and no new can be setup since pp is locked exclusively.
3528 *
3529 * Lock the root to make sure there's only one hat_page_demote()
3530 * outstanding within the area of this root's pszc.
3531 *
3532 * Second potential hat_page_demote() is already eliminated by upper
3533 * VM layer via page_szc_lock() but we don't rely on it and use our
3534 * own locking (so that upper layer locking can be changed without
3535 * assumptions that hat depends on upper layer VM to prevent multiple
3536 * hat_page_demote() to be issued simultaneously to the same large
3537 * page).
3538 */
3539 again:
3540 pszc = pp->p_szc;
3541 if (pszc == 0)
3542 return;
3543 rootpp = PP_GROUPLEADER(pp, pszc);
3544 x86_hm_enter(rootpp);
3545 /*
3546 * If root's p_szc is different from pszc we raced with another
3547 * hat_page_demote(). Drop the lock and try to find the root again.
3548 * If root's p_szc is greater than pszc previous hat_page_demote() is
3549 * not done yet. Take and release mlist lock of root's root to wait
3550 * for previous hat_page_demote() to complete.
3551 */
3552 if ((rszc = rootpp->p_szc) != pszc) {
3553 x86_hm_exit(rootpp);
3554 if (rszc > pszc) {
3555 /* p_szc of a locked non free page can't increase */
3556 ASSERT(pp != rootpp);
3557
3558 rootpp = PP_GROUPLEADER(rootpp, rszc);
3559 x86_hm_enter(rootpp);
3560 x86_hm_exit(rootpp);
3561 }
3562 goto again;
3563 }
3564 ASSERT(pp->p_szc == pszc);
3565
3566 /*
3567 * Decrement by 1 p_szc of every constituent page of a region that
3568 * covered pp. For example if original szc is 3 it gets changed to 2
3569 * everywhere except in region 2 that covered pp. Region 2 that
3570 * covered pp gets demoted to 1 everywhere except in region 1 that
3571 * covered pp. The region 1 that covered pp is demoted to region
3572 * 0. It's done this way because from region 3 we removed level 3
3573 * mappings, from region 2 that covered pp we removed level 2 mappings
3574 * and from region 1 that covered pp we removed level 1 mappings. All
3575 * changes are done from from high pfn's to low pfn's so that roots
3576 * are changed last allowing one to know the largest region where
3577 * hat_page_demote() is stil active by only looking at the root page.
3578 *
3579 * This algorithm is implemented in 2 while loops. First loop changes
3580 * p_szc of pages to the right of pp's level 1 region and second
3581 * loop changes p_szc of pages of level 1 region that covers pp
3582 * and all pages to the left of level 1 region that covers pp.
3583 * In the first loop p_szc keeps dropping with every iteration
3584 * and in the second loop it keeps increasing with every iteration.
3585 *
3586 * First loop description: Demote pages to the right of pp outside of
3587 * level 1 region that covers pp. In every iteration of the while
3588 * loop below find the last page of szc region and the first page of
3589 * (szc - 1) region that is immediately to the right of (szc - 1)
3590 * region that covers pp. From last such page to first such page
3591 * change every page's szc to szc - 1. Decrement szc and continue
3592 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3593 * of szc region skip to the next iteration.
3594 */
3595 szc = pszc;
3596 while (szc > 1) {
3597 lastpp = PP_GROUPLEADER(pp, szc);
3598 pgcnt = page_get_pagecnt(szc);
3599 lastpp += pgcnt - 1;
3600 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3601 pgcnt = page_get_pagecnt(szc - 1);
3602 if (lastpp - firstpp < pgcnt) {
3603 szc--;
3604 continue;
3605 }
3606 firstpp += pgcnt;
3607 while (lastpp != firstpp) {
3608 ASSERT(lastpp->p_szc == pszc);
3609 lastpp->p_szc = szc - 1;
3610 lastpp--;
3611 }
3612 firstpp->p_szc = szc - 1;
3613 szc--;
3614 }
3615
3616 /*
3617 * Second loop description:
3618 * First iteration changes p_szc to 0 of every
3619 * page of level 1 region that covers pp.
3620 * Subsequent iterations find last page of szc region
3621 * immediately to the left of szc region that covered pp
3622 * and first page of (szc + 1) region that covers pp.
3623 * From last to first page change p_szc of every page to szc.
3624 * Increment szc and continue looping until szc is pszc.
3625 * If pp belongs to the fist szc region of (szc + 1) region
3626 * skip to the next iteration.
3627 *
3628 */
3629 szc = 0;
3630 while (szc < pszc) {
3631 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3632 if (szc == 0) {
3633 pgcnt = page_get_pagecnt(1);
3634 lastpp = firstpp + (pgcnt - 1);
3635 } else {
3636 lastpp = PP_GROUPLEADER(pp, szc);
3637 if (firstpp == lastpp) {
3638 szc++;
3639 continue;
3640 }
3641 lastpp--;
3642 pgcnt = page_get_pagecnt(szc);
3643 }
3644 while (lastpp != firstpp) {
3645 ASSERT(lastpp->p_szc == pszc);
3646 lastpp->p_szc = szc;
3647 lastpp--;
3648 }
3649 firstpp->p_szc = szc;
3650 if (firstpp == rootpp)
3651 break;
3652 szc++;
3653 }
3654 x86_hm_exit(rootpp);
3655 }
3656
3657 /*
3658 * get hw stats from hardware into page struct and reset hw stats
3659 * returns attributes of page
3660 * Flags for hat_pagesync, hat_getstat, hat_sync
3661 *
3662 * define HAT_SYNC_ZERORM 0x01
3663 *
3664 * Additional flags for hat_pagesync
3665 *
3666 * define HAT_SYNC_STOPON_REF 0x02
3667 * define HAT_SYNC_STOPON_MOD 0x04
3668 * define HAT_SYNC_STOPON_RM 0x06
3669 * define HAT_SYNC_STOPON_SHARED 0x08
3670 */
3671 uint_t
hat_pagesync(struct page * pp,uint_t flags)3672 hat_pagesync(struct page *pp, uint_t flags)
3673 {
3674 hment_t *hm = NULL;
3675 htable_t *ht;
3676 uint_t entry;
3677 x86pte_t old, save_old;
3678 x86pte_t new;
3679 uchar_t nrmbits = P_REF|P_MOD|P_RO;
3680 extern ulong_t po_share;
3681 page_t *save_pp = pp;
3682 uint_t pszc = 0;
3683
3684 ASSERT(PAGE_LOCKED(pp) || panicstr);
3685
3686 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3687 return (pp->p_nrm & nrmbits);
3688
3689 if ((flags & HAT_SYNC_ZERORM) == 0) {
3690
3691 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3692 return (pp->p_nrm & nrmbits);
3693
3694 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3695 return (pp->p_nrm & nrmbits);
3696
3697 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3698 hat_page_getshare(pp) > po_share) {
3699 if (PP_ISRO(pp))
3700 PP_SETREF(pp);
3701 return (pp->p_nrm & nrmbits);
3702 }
3703 }
3704
3705 XPV_DISALLOW_MIGRATE();
3706 next_size:
3707 /*
3708 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3709 */
3710 x86_hm_enter(pp);
3711 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3712 if (ht->ht_level < pszc)
3713 continue;
3714 old = x86pte_get(ht, entry);
3715 try_again:
3716
3717 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3718
3719 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3720 continue;
3721
3722 save_old = old;
3723 if ((flags & HAT_SYNC_ZERORM) != 0) {
3724
3725 /*
3726 * Need to clear ref or mod bits. Need to demap
3727 * to make sure any executing TLBs see cleared bits.
3728 */
3729 new = old;
3730 PTE_CLR(new, PT_REF | PT_MOD);
3731 old = hati_update_pte(ht, entry, old, new);
3732 if (old != 0)
3733 goto try_again;
3734
3735 old = save_old;
3736 }
3737
3738 /*
3739 * Sync the PTE
3740 */
3741 if (!(flags & HAT_SYNC_ZERORM) &&
3742 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3743 hati_sync_pte_to_page(pp, old, ht->ht_level);
3744
3745 /*
3746 * can stop short if we found a ref'd or mod'd page
3747 */
3748 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3749 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3750 x86_hm_exit(pp);
3751 goto done;
3752 }
3753 }
3754 x86_hm_exit(pp);
3755 while (pszc < pp->p_szc) {
3756 page_t *tpp;
3757 pszc++;
3758 tpp = PP_GROUPLEADER(pp, pszc);
3759 if (pp != tpp) {
3760 pp = tpp;
3761 goto next_size;
3762 }
3763 }
3764 done:
3765 XPV_ALLOW_MIGRATE();
3766 return (save_pp->p_nrm & nrmbits);
3767 }
3768
3769 /*
3770 * returns approx number of mappings to this pp. A return of 0 implies
3771 * there are no mappings to the page.
3772 */
3773 ulong_t
hat_page_getshare(page_t * pp)3774 hat_page_getshare(page_t *pp)
3775 {
3776 uint_t cnt;
3777 cnt = hment_mapcnt(pp);
3778 #if defined(__amd64)
3779 if (vpm_enable && pp->p_vpmref) {
3780 cnt += 1;
3781 }
3782 #endif
3783 return (cnt);
3784 }
3785
3786 /*
3787 * Return 1 the number of mappings exceeds sh_thresh. Return 0
3788 * otherwise.
3789 */
3790 int
hat_page_checkshare(page_t * pp,ulong_t sh_thresh)3791 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3792 {
3793 return (hat_page_getshare(pp) > sh_thresh);
3794 }
3795
3796 /*
3797 * hat_softlock isn't supported anymore
3798 */
3799 /*ARGSUSED*/
3800 faultcode_t
hat_softlock(hat_t * hat,caddr_t addr,size_t * len,struct page ** page_array,uint_t flags)3801 hat_softlock(
3802 hat_t *hat,
3803 caddr_t addr,
3804 size_t *len,
3805 struct page **page_array,
3806 uint_t flags)
3807 {
3808 return (FC_NOSUPPORT);
3809 }
3810
3811
3812
3813 /*
3814 * Routine to expose supported HAT features to platform independent code.
3815 */
3816 /*ARGSUSED*/
3817 int
hat_supported(enum hat_features feature,void * arg)3818 hat_supported(enum hat_features feature, void *arg)
3819 {
3820 switch (feature) {
3821
3822 case HAT_SHARED_PT: /* this is really ISM */
3823 return (1);
3824
3825 case HAT_DYNAMIC_ISM_UNMAP:
3826 return (0);
3827
3828 case HAT_VMODSORT:
3829 return (1);
3830
3831 case HAT_SHARED_REGIONS:
3832 return (0);
3833
3834 default:
3835 panic("hat_supported() - unknown feature");
3836 }
3837 return (0);
3838 }
3839
3840 /*
3841 * Called when a thread is exiting and has been switched to the kernel AS
3842 */
3843 void
hat_thread_exit(kthread_t * thd)3844 hat_thread_exit(kthread_t *thd)
3845 {
3846 ASSERT(thd->t_procp->p_as == &kas);
3847 XPV_DISALLOW_MIGRATE();
3848 hat_switch(thd->t_procp->p_as->a_hat);
3849 XPV_ALLOW_MIGRATE();
3850 }
3851
3852 /*
3853 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3854 */
3855 /*ARGSUSED*/
3856 void
hat_setup(hat_t * hat,int flags)3857 hat_setup(hat_t *hat, int flags)
3858 {
3859 XPV_DISALLOW_MIGRATE();
3860 kpreempt_disable();
3861
3862 hat_switch(hat);
3863
3864 kpreempt_enable();
3865 XPV_ALLOW_MIGRATE();
3866 }
3867
3868 /*
3869 * Prepare for a CPU private mapping for the given address.
3870 *
3871 * The address can only be used from a single CPU and can be remapped
3872 * using hat_mempte_remap(). Return the address of the PTE.
3873 *
3874 * We do the htable_create() if necessary and increment the valid count so
3875 * the htable can't disappear. We also hat_devload() the page table into
3876 * kernel so that the PTE is quickly accessed.
3877 */
3878 hat_mempte_t
hat_mempte_setup(caddr_t addr)3879 hat_mempte_setup(caddr_t addr)
3880 {
3881 uintptr_t va = (uintptr_t)addr;
3882 htable_t *ht;
3883 uint_t entry;
3884 x86pte_t oldpte;
3885 hat_mempte_t p;
3886
3887 ASSERT(IS_PAGEALIGNED(va));
3888 ASSERT(!IN_VA_HOLE(va));
3889 ++curthread->t_hatdepth;
3890 XPV_DISALLOW_MIGRATE();
3891 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3892 if (ht == NULL) {
3893 ht = htable_create(kas.a_hat, va, 0, NULL);
3894 entry = htable_va2entry(va, ht);
3895 ASSERT(ht->ht_level == 0);
3896 oldpte = x86pte_get(ht, entry);
3897 }
3898 if (PTE_ISVALID(oldpte))
3899 panic("hat_mempte_setup(): address already mapped"
3900 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3901
3902 /*
3903 * increment ht_valid_cnt so that the pagetable can't disappear
3904 */
3905 HTABLE_INC(ht->ht_valid_cnt);
3906
3907 /*
3908 * return the PTE physical address to the caller.
3909 */
3910 htable_release(ht);
3911 XPV_ALLOW_MIGRATE();
3912 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3913 --curthread->t_hatdepth;
3914 return (p);
3915 }
3916
3917 /*
3918 * Release a CPU private mapping for the given address.
3919 * We decrement the htable valid count so it might be destroyed.
3920 */
3921 /*ARGSUSED1*/
3922 void
hat_mempte_release(caddr_t addr,hat_mempte_t pte_pa)3923 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3924 {
3925 htable_t *ht;
3926
3927 XPV_DISALLOW_MIGRATE();
3928 /*
3929 * invalidate any left over mapping and decrement the htable valid count
3930 */
3931 #ifdef __xpv
3932 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3933 UVMF_INVLPG | UVMF_LOCAL))
3934 panic("HYPERVISOR_update_va_mapping() failed");
3935 #else
3936 {
3937 x86pte_t *pteptr;
3938
3939 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3940 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3941 if (mmu.pae_hat)
3942 *pteptr = 0;
3943 else
3944 *(x86pte32_t *)pteptr = 0;
3945 mmu_tlbflush_entry(addr);
3946 x86pte_mapout();
3947 }
3948 #endif
3949
3950 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3951 if (ht == NULL)
3952 panic("hat_mempte_release(): invalid address");
3953 ASSERT(ht->ht_level == 0);
3954 HTABLE_DEC(ht->ht_valid_cnt);
3955 htable_release(ht);
3956 XPV_ALLOW_MIGRATE();
3957 }
3958
3959 /*
3960 * Apply a temporary CPU private mapping to a page. We flush the TLB only
3961 * on this CPU, so this ought to have been called with preemption disabled.
3962 */
3963 void
hat_mempte_remap(pfn_t pfn,caddr_t addr,hat_mempte_t pte_pa,uint_t attr,uint_t flags)3964 hat_mempte_remap(
3965 pfn_t pfn,
3966 caddr_t addr,
3967 hat_mempte_t pte_pa,
3968 uint_t attr,
3969 uint_t flags)
3970 {
3971 uintptr_t va = (uintptr_t)addr;
3972 x86pte_t pte;
3973
3974 /*
3975 * Remap the given PTE to the new page's PFN. Invalidate only
3976 * on this CPU.
3977 */
3978 #ifdef DEBUG
3979 htable_t *ht;
3980 uint_t entry;
3981
3982 ASSERT(IS_PAGEALIGNED(va));
3983 ASSERT(!IN_VA_HOLE(va));
3984 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3985 ASSERT(ht != NULL);
3986 ASSERT(ht->ht_level == 0);
3987 ASSERT(ht->ht_valid_cnt > 0);
3988 ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3989 htable_release(ht);
3990 #endif
3991 XPV_DISALLOW_MIGRATE();
3992 pte = hati_mkpte(pfn, attr, 0, flags);
3993 #ifdef __xpv
3994 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3995 panic("HYPERVISOR_update_va_mapping() failed");
3996 #else
3997 {
3998 x86pte_t *pteptr;
3999
4000 pteptr = x86pte_mapin(mmu_btop(pte_pa),
4001 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
4002 if (mmu.pae_hat)
4003 *(x86pte_t *)pteptr = pte;
4004 else
4005 *(x86pte32_t *)pteptr = (x86pte32_t)pte;
4006 mmu_tlbflush_entry(addr);
4007 x86pte_mapout();
4008 }
4009 #endif
4010 XPV_ALLOW_MIGRATE();
4011 }
4012
4013
4014
4015 /*
4016 * Hat locking functions
4017 * XXX - these two functions are currently being used by hatstats
4018 * they can be removed by using a per-as mutex for hatstats.
4019 */
4020 void
hat_enter(hat_t * hat)4021 hat_enter(hat_t *hat)
4022 {
4023 mutex_enter(&hat->hat_mutex);
4024 }
4025
4026 void
hat_exit(hat_t * hat)4027 hat_exit(hat_t *hat)
4028 {
4029 mutex_exit(&hat->hat_mutex);
4030 }
4031
4032 /*
4033 * HAT part of cpu initialization.
4034 */
4035 void
hat_cpu_online(struct cpu * cpup)4036 hat_cpu_online(struct cpu *cpup)
4037 {
4038 if (cpup != CPU) {
4039 x86pte_cpu_init(cpup);
4040 hat_vlp_setup(cpup);
4041 }
4042 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4043 }
4044
4045 /*
4046 * HAT part of cpu deletion.
4047 * (currently, we only call this after the cpu is safely passivated.)
4048 */
4049 void
hat_cpu_offline(struct cpu * cpup)4050 hat_cpu_offline(struct cpu *cpup)
4051 {
4052 ASSERT(cpup != CPU);
4053
4054 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4055 hat_vlp_teardown(cpup);
4056 x86pte_cpu_fini(cpup);
4057 }
4058
4059 /*
4060 * Function called after all CPUs are brought online.
4061 * Used to remove low address boot mappings.
4062 */
4063 void
clear_boot_mappings(uintptr_t low,uintptr_t high)4064 clear_boot_mappings(uintptr_t low, uintptr_t high)
4065 {
4066 uintptr_t vaddr = low;
4067 htable_t *ht = NULL;
4068 level_t level;
4069 uint_t entry;
4070 x86pte_t pte;
4071
4072 /*
4073 * On 1st CPU we can unload the prom mappings, basically we blow away
4074 * all virtual mappings under _userlimit.
4075 */
4076 while (vaddr < high) {
4077 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4078 if (ht == NULL)
4079 break;
4080
4081 level = ht->ht_level;
4082 entry = htable_va2entry(vaddr, ht);
4083 ASSERT(level <= mmu.max_page_level);
4084 ASSERT(PTE_ISPAGE(pte, level));
4085
4086 /*
4087 * Unload the mapping from the page tables.
4088 */
4089 (void) x86pte_inval(ht, entry, 0, NULL);
4090 ASSERT(ht->ht_valid_cnt > 0);
4091 HTABLE_DEC(ht->ht_valid_cnt);
4092 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4093
4094 vaddr += LEVEL_SIZE(ht->ht_level);
4095 }
4096 if (ht)
4097 htable_release(ht);
4098 }
4099
4100 /*
4101 * Atomically update a new translation for a single page. If the
4102 * currently installed PTE doesn't match the value we expect to find,
4103 * it's not updated and we return the PTE we found.
4104 *
4105 * If activating nosync or NOWRITE and the page was modified we need to sync
4106 * with the page_t. Also sync with page_t if clearing ref/mod bits.
4107 */
4108 static x86pte_t
hati_update_pte(htable_t * ht,uint_t entry,x86pte_t expected,x86pte_t new)4109 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4110 {
4111 page_t *pp;
4112 uint_t rm = 0;
4113 x86pte_t replaced;
4114
4115 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4116 PTE_GET(expected, PT_MOD | PT_REF) &&
4117 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4118 !PTE_GET(new, PT_MOD | PT_REF))) {
4119
4120 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4121 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4122 ASSERT(pp != NULL);
4123 if (PTE_GET(expected, PT_MOD))
4124 rm |= P_MOD;
4125 if (PTE_GET(expected, PT_REF))
4126 rm |= P_REF;
4127 PTE_CLR(new, PT_MOD | PT_REF);
4128 }
4129
4130 replaced = x86pte_update(ht, entry, expected, new);
4131 if (replaced != expected)
4132 return (replaced);
4133
4134 if (rm) {
4135 /*
4136 * sync to all constituent pages of a large page
4137 */
4138 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4139 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4140 while (pgcnt-- > 0) {
4141 /*
4142 * hat_page_demote() can't decrease
4143 * pszc below this mapping size
4144 * since large mapping existed after we
4145 * took mlist lock.
4146 */
4147 ASSERT(pp->p_szc >= ht->ht_level);
4148 hat_page_setattr(pp, rm);
4149 ++pp;
4150 }
4151 }
4152
4153 return (0);
4154 }
4155
4156 /* ARGSUSED */
4157 void
hat_join_srd(struct hat * hat,vnode_t * evp)4158 hat_join_srd(struct hat *hat, vnode_t *evp)
4159 {
4160 }
4161
4162 /* ARGSUSED */
4163 hat_region_cookie_t
hat_join_region(struct hat * hat,caddr_t r_saddr,size_t r_size,void * r_obj,u_offset_t r_objoff,uchar_t r_perm,uchar_t r_pgszc,hat_rgn_cb_func_t r_cb_function,uint_t flags)4164 hat_join_region(struct hat *hat,
4165 caddr_t r_saddr,
4166 size_t r_size,
4167 void *r_obj,
4168 u_offset_t r_objoff,
4169 uchar_t r_perm,
4170 uchar_t r_pgszc,
4171 hat_rgn_cb_func_t r_cb_function,
4172 uint_t flags)
4173 {
4174 panic("No shared region support on x86");
4175 return (HAT_INVALID_REGION_COOKIE);
4176 }
4177
4178 /* ARGSUSED */
4179 void
hat_leave_region(struct hat * hat,hat_region_cookie_t rcookie,uint_t flags)4180 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4181 {
4182 panic("No shared region support on x86");
4183 }
4184
4185 /* ARGSUSED */
4186 void
hat_dup_region(struct hat * hat,hat_region_cookie_t rcookie)4187 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4188 {
4189 panic("No shared region support on x86");
4190 }
4191
4192
4193 /*
4194 * Kernel Physical Mapping (kpm) facility
4195 *
4196 * Most of the routines needed to support segkpm are almost no-ops on the
4197 * x86 platform. We map in the entire segment when it is created and leave
4198 * it mapped in, so there is no additional work required to set up and tear
4199 * down individual mappings. All of these routines were created to support
4200 * SPARC platforms that have to avoid aliasing in their virtually indexed
4201 * caches.
4202 *
4203 * Most of the routines have sanity checks in them (e.g. verifying that the
4204 * passed-in page is locked). We don't actually care about most of these
4205 * checks on x86, but we leave them in place to identify problems in the
4206 * upper levels.
4207 */
4208
4209 /*
4210 * Map in a locked page and return the vaddr.
4211 */
4212 /*ARGSUSED*/
4213 caddr_t
hat_kpm_mapin(struct page * pp,struct kpme * kpme)4214 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4215 {
4216 caddr_t vaddr;
4217
4218 #ifdef DEBUG
4219 if (kpm_enable == 0) {
4220 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4221 return ((caddr_t)NULL);
4222 }
4223
4224 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4225 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4226 return ((caddr_t)NULL);
4227 }
4228 #endif
4229
4230 vaddr = hat_kpm_page2va(pp, 1);
4231
4232 return (vaddr);
4233 }
4234
4235 /*
4236 * Mapout a locked page.
4237 */
4238 /*ARGSUSED*/
4239 void
hat_kpm_mapout(struct page * pp,struct kpme * kpme,caddr_t vaddr)4240 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4241 {
4242 #ifdef DEBUG
4243 if (kpm_enable == 0) {
4244 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4245 return;
4246 }
4247
4248 if (IS_KPM_ADDR(vaddr) == 0) {
4249 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4250 return;
4251 }
4252
4253 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4254 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4255 return;
4256 }
4257 #endif
4258 }
4259
4260 /*
4261 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4262 * memory addresses that are not described by a page_t. It can
4263 * also be used for normal pages that are not locked, but beware
4264 * this is dangerous - no locking is performed, so the identity of
4265 * the page could change. hat_kpm_mapin_pfn is not supported when
4266 * vac_colors > 1, because the chosen va depends on the page identity,
4267 * which could change.
4268 * The caller must only pass pfn's for valid physical addresses; violation
4269 * of this rule will cause panic.
4270 */
4271 caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)4272 hat_kpm_mapin_pfn(pfn_t pfn)
4273 {
4274 caddr_t paddr, vaddr;
4275
4276 if (kpm_enable == 0)
4277 return ((caddr_t)NULL);
4278
4279 paddr = (caddr_t)ptob(pfn);
4280 vaddr = (uintptr_t)kpm_vbase + paddr;
4281
4282 return ((caddr_t)vaddr);
4283 }
4284
4285 /*ARGSUSED*/
4286 void
hat_kpm_mapout_pfn(pfn_t pfn)4287 hat_kpm_mapout_pfn(pfn_t pfn)
4288 {
4289 /* empty */
4290 }
4291
4292 /*
4293 * Return the kpm virtual address for a specific pfn
4294 */
4295 caddr_t
hat_kpm_pfn2va(pfn_t pfn)4296 hat_kpm_pfn2va(pfn_t pfn)
4297 {
4298 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4299
4300 ASSERT(!pfn_is_foreign(pfn));
4301 return ((caddr_t)vaddr);
4302 }
4303
4304 /*
4305 * Return the kpm virtual address for the page at pp.
4306 */
4307 /*ARGSUSED*/
4308 caddr_t
hat_kpm_page2va(struct page * pp,int checkswap)4309 hat_kpm_page2va(struct page *pp, int checkswap)
4310 {
4311 return (hat_kpm_pfn2va(pp->p_pagenum));
4312 }
4313
4314 /*
4315 * Return the page frame number for the kpm virtual address vaddr.
4316 */
4317 pfn_t
hat_kpm_va2pfn(caddr_t vaddr)4318 hat_kpm_va2pfn(caddr_t vaddr)
4319 {
4320 pfn_t pfn;
4321
4322 ASSERT(IS_KPM_ADDR(vaddr));
4323
4324 pfn = (pfn_t)btop(vaddr - kpm_vbase);
4325
4326 return (pfn);
4327 }
4328
4329
4330 /*
4331 * Return the page for the kpm virtual address vaddr.
4332 */
4333 page_t *
hat_kpm_vaddr2page(caddr_t vaddr)4334 hat_kpm_vaddr2page(caddr_t vaddr)
4335 {
4336 pfn_t pfn;
4337
4338 ASSERT(IS_KPM_ADDR(vaddr));
4339
4340 pfn = hat_kpm_va2pfn(vaddr);
4341
4342 return (page_numtopp_nolock(pfn));
4343 }
4344
4345 /*
4346 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4347 * KPM page. This should never happen on x86
4348 */
4349 int
hat_kpm_fault(hat_t * hat,caddr_t vaddr)4350 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4351 {
4352 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4353 (void *)hat, (void *)vaddr);
4354
4355 return (0);
4356 }
4357
4358 /*ARGSUSED*/
4359 void
hat_kpm_mseghash_clear(int nentries)4360 hat_kpm_mseghash_clear(int nentries)
4361 {}
4362
4363 /*ARGSUSED*/
4364 void
hat_kpm_mseghash_update(pgcnt_t inx,struct memseg * msp)4365 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4366 {}
4367
4368 #ifndef __xpv
4369 void
hat_kpm_addmem_mseg_update(struct memseg * msp,pgcnt_t nkpmpgs,offset_t kpm_pages_off)4370 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4371 offset_t kpm_pages_off)
4372 {
4373 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4374 pfn_t base, end;
4375
4376 /*
4377 * kphysm_add_memory_dynamic() does not set nkpmpgs
4378 * when page_t memory is externally allocated. That
4379 * code must properly calculate nkpmpgs in all cases
4380 * if nkpmpgs needs to be used at some point.
4381 */
4382
4383 /*
4384 * The meta (page_t) pages for dynamically added memory are allocated
4385 * either from the incoming memory itself or from existing memory.
4386 * In the former case the base of the incoming pages will be different
4387 * than the base of the dynamic segment so call memseg_get_start() to
4388 * get the actual base of the incoming memory for each case.
4389 */
4390
4391 base = memseg_get_start(msp);
4392 end = msp->pages_end;
4393
4394 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4395 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4396 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4397 }
4398
4399 void
hat_kpm_addmem_mseg_insert(struct memseg * msp)4400 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4401 {
4402 _NOTE(ARGUNUSED(msp));
4403 }
4404
4405 void
hat_kpm_addmem_memsegs_update(struct memseg * msp)4406 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4407 {
4408 _NOTE(ARGUNUSED(msp));
4409 }
4410
4411 /*
4412 * Return end of metadata for an already setup memseg.
4413 * X86 platforms don't need per-page meta data to support kpm.
4414 */
4415 caddr_t
hat_kpm_mseg_reuse(struct memseg * msp)4416 hat_kpm_mseg_reuse(struct memseg *msp)
4417 {
4418 return ((caddr_t)msp->epages);
4419 }
4420
4421 void
hat_kpm_delmem_mseg_update(struct memseg * msp,struct memseg ** mspp)4422 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4423 {
4424 _NOTE(ARGUNUSED(msp, mspp));
4425 ASSERT(0);
4426 }
4427
4428 void
hat_kpm_split_mseg_update(struct memseg * msp,struct memseg ** mspp,struct memseg * lo,struct memseg * mid,struct memseg * hi)4429 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4430 struct memseg *lo, struct memseg *mid, struct memseg *hi)
4431 {
4432 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4433 ASSERT(0);
4434 }
4435
4436 /*
4437 * Walk the memsegs chain, applying func to each memseg span.
4438 */
4439 void
hat_kpm_walk(void (* func)(void *,void *,size_t),void * arg)4440 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4441 {
4442 pfn_t pbase, pend;
4443 void *base;
4444 size_t size;
4445 struct memseg *msp;
4446
4447 for (msp = memsegs; msp; msp = msp->next) {
4448 pbase = msp->pages_base;
4449 pend = msp->pages_end;
4450 base = ptob(pbase) + kpm_vbase;
4451 size = ptob(pend - pbase);
4452 func(arg, base, size);
4453 }
4454 }
4455
4456 #else /* __xpv */
4457
4458 /*
4459 * There are specific Hypervisor calls to establish and remove mappings
4460 * to grant table references and the privcmd driver. We have to ensure
4461 * that a page table actually exists.
4462 */
4463 void
hat_prepare_mapping(hat_t * hat,caddr_t addr,uint64_t * pte_ma)4464 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4465 {
4466 maddr_t base_ma;
4467 htable_t *ht;
4468 uint_t entry;
4469
4470 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4471 XPV_DISALLOW_MIGRATE();
4472 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4473
4474 /*
4475 * if an address for pte_ma is passed in, return the MA of the pte
4476 * for this specific address. This address is only valid as long
4477 * as the htable stays locked.
4478 */
4479 if (pte_ma != NULL) {
4480 entry = htable_va2entry((uintptr_t)addr, ht);
4481 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4482 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4483 }
4484 XPV_ALLOW_MIGRATE();
4485 }
4486
4487 void
hat_release_mapping(hat_t * hat,caddr_t addr)4488 hat_release_mapping(hat_t *hat, caddr_t addr)
4489 {
4490 htable_t *ht;
4491
4492 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4493 XPV_DISALLOW_MIGRATE();
4494 ht = htable_lookup(hat, (uintptr_t)addr, 0);
4495 ASSERT(ht != NULL);
4496 ASSERT(ht->ht_busy >= 2);
4497 htable_release(ht);
4498 htable_release(ht);
4499 XPV_ALLOW_MIGRATE();
4500 }
4501 #endif /* __xpv */
4502