1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * %sccs.include.redist.c%
10 *
11 * @(#)pmap.c 8.1 (Berkeley) 06/11/93
12 */
13
14 /*
15 * Derived from hp300 version by Mike Hibler, this version by William
16 * Jolitz uses a recursive map [a pde points to the page directory] to
17 * map the page tables using the pagetables themselves. This is done to
18 * reduce the impact on kernel virtual memory for lots of sparse address
19 * space, and to reduce the cost of memory to each process.
20 *
21 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90
22 */
23
24 /*
25 * Reno i386 version, from Mike Hibler's hp300 version.
26 */
27
28 /*
29 * Manages physical address maps.
30 *
31 * In addition to hardware address maps, this
32 * module is called upon to provide software-use-only
33 * maps which may or may not be stored in the same
34 * form as hardware maps. These pseudo-maps are
35 * used to store intermediate results from copy
36 * operations to and from address spaces.
37 *
38 * Since the information managed by this module is
39 * also stored by the logical address mapping module,
40 * this module may throw away valid virtual-to-physical
41 * mappings at almost any time. However, invalidations
42 * of virtual-to-physical mappings must be done as
43 * requested.
44 *
45 * In order to cope with hardware architectures which
46 * make virtual-to-physical map invalidates expensive,
47 * this module may delay invalidate or reduced protection
48 * operations until such time as they are actually
49 * necessary. This module is given full information as
50 * to which processors are currently using which maps,
51 * and to when physical maps must be made correct.
52 */
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/malloc.h>
57 #include <sys/user.h>
58
59 #include <vm/vm.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62
63 #ifdef NOTDEF
64 include <vm/vm_pageout.h>
65 include <machine/isa.h>
66 #endif
67
68 /*
69 * Allocate various and sundry SYSMAPs used in the days of old VM
70 * and not yet converted. XXX.
71 */
72 #define BSDVM_COMPAT 1
73
74 #ifdef DEBUG
75 struct {
76 int kernel; /* entering kernel mapping */
77 int user; /* entering user mapping */
78 int ptpneeded; /* needed to allocate a PT page */
79 int pwchange; /* no mapping change, just wiring or protection */
80 int wchange; /* no mapping change, just wiring */
81 int mchange; /* was mapped but mapping to different page */
82 int managed; /* a managed page */
83 int firstpv; /* first mapping for this PA */
84 int secondpv; /* second mapping for this PA */
85 int ci; /* cache inhibited */
86 int unmanaged; /* not a managed page */
87 int flushes; /* cache flushes */
88 } enter_stats;
89 struct {
90 int calls;
91 int removes;
92 int pvfirst;
93 int pvsearch;
94 int ptinvalid;
95 int uflushes;
96 int sflushes;
97 } remove_stats;
98
99 int debugmap = 0;
100 int pmapdebug = 0;
101 #define PDB_FOLLOW 0x0001
102 #define PDB_INIT 0x0002
103 #define PDB_ENTER 0x0004
104 #define PDB_REMOVE 0x0008
105 #define PDB_CREATE 0x0010
106 #define PDB_PTPAGE 0x0020
107 #define PDB_CACHE 0x0040
108 #define PDB_BITS 0x0080
109 #define PDB_COLLECT 0x0100
110 #define PDB_PROTECT 0x0200
111 #define PDB_PDRTAB 0x0400
112 #define PDB_PARANOIA 0x2000
113 #define PDB_WIRING 0x4000
114 #define PDB_PVDUMP 0x8000
115
116 int pmapvacflush = 0;
117 #define PVF_ENTER 0x01
118 #define PVF_REMOVE 0x02
119 #define PVF_PROTECT 0x04
120 #define PVF_TOTAL 0x80
121 #endif
122
123 /*
124 * Get PDEs and PTEs for user/kernel address space
125 */
126 #define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
127
128 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
129
130 #define pmap_pde_v(pte) ((pte)->pd_v)
131 #define pmap_pte_w(pte) ((pte)->pg_w)
132 /* #define pmap_pte_ci(pte) ((pte)->pg_ci) */
133 #define pmap_pte_m(pte) ((pte)->pg_m)
134 #define pmap_pte_u(pte) ((pte)->pg_u)
135 #define pmap_pte_v(pte) ((pte)->pg_v)
136 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v))
137 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v))
138
139 /*
140 * Given a map and a machine independent protection code,
141 * convert to a vax protection code.
142 */
143 #define pte_prot(m, p) (protection_codes[p])
144 int protection_codes[8];
145
146 struct pmap kernel_pmap_store;
147
148 vm_offset_t avail_start; /* PA of first available physical page */
149 vm_offset_t avail_end; /* PA of last available physical page */
150 vm_size_t mem_size; /* memory size in bytes */
151 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
152 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
153 vm_offset_t vm_first_phys; /* PA of first managed page */
154 vm_offset_t vm_last_phys; /* PA just past last managed page */
155 int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */
156 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
157 char *pmap_attributes; /* reference and modify bits */
158
159 boolean_t pmap_testbit();
160 void pmap_clear_modify();
161
162 #if BSDVM_COMPAT
163 #include <sys/msgbuf.h>
164
165 /*
166 * All those kernel PT submaps that BSD is so fond of
167 */
168 struct pte *CMAP1, *CMAP2, *mmap;
169 caddr_t CADDR1, CADDR2, vmmap;
170 struct pte *msgbufmap;
171 struct msgbuf *msgbufp;
172 #endif
173
174 void pmap_activate __P((pmap_t, struct pcb *));
175
176 /*
177 * Bootstrap the system enough to run with virtual memory.
178 * Map the kernel's code and data, and allocate the system page table.
179 *
180 * On the I386 this is called after mapping has already been enabled
181 * and just syncs the pmap module with what has already been done.
182 * [We can't call it easily with mapping off since the kernel is not
183 * mapped with PA == VA, hence we would have to relocate every address
184 * from the linked base (virtual) address 0xFE000000 to the actual
185 * (physical) address starting relative to 0]
186 */
187 struct pte *pmap_pte();
188
189 extern vm_offset_t atdevbase;
190 void
pmap_bootstrap(firstaddr,loadaddr)191 pmap_bootstrap(firstaddr, loadaddr)
192 vm_offset_t firstaddr;
193 vm_offset_t loadaddr;
194 {
195 #if BSDVM_COMPAT
196 vm_offset_t va;
197 struct pte *pte;
198 #endif
199 extern vm_offset_t maxmem, physmem;
200 extern int IdlePTD;
201
202
203 /* disable pageing in basemem for all machines until this cryptic comment
204 * can be explained
205 */
206 #if 1 || defined(ODYSSEUS) || defined(ARGO) || defined(CIRCE)
207 firstaddr=0x100000; /* for some reason, basemem screws up on this machine */
208 #endif
209 printf("ps %x pe %x ", firstaddr, maxmem <<PG_SHIFT);
210 avail_start = firstaddr;
211 avail_end = maxmem << PG_SHIFT;
212
213 /* XXX: allow for msgbuf */
214 avail_end -= i386_round_page(sizeof(struct msgbuf));
215
216 mem_size = physmem << PG_SHIFT;
217 virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;
218 virtual_end = VM_MAX_KERNEL_ADDRESS;
219 i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;
220
221 /*
222 * Initialize protection array.
223 */
224 i386_protection_init();
225
226 #ifdef notdef
227 /*
228 * Create Kernel page directory table and page maps.
229 * [ currently done in locore. i have wild and crazy ideas -wfj ]
230 */
231 bzero(firstaddr, 4*NBPG);
232 kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
233 kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
234
235 firstaddr += NBPG;
236 for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
237 x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
238 struct pde *pde;
239 pde = kernel_pmap->pm_pdir + x;
240 *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
241 }
242 #else
243 kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);
244 #endif
245
246
247 simple_lock_init(&kernel_pmap->pm_lock);
248 kernel_pmap->pm_count = 1;
249
250 #if BSDVM_COMPAT
251 /*
252 * Allocate all the submaps we need
253 */
254 #define SYSMAP(c, p, v, n) \
255 v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);
256
257 va = virtual_avail;
258 pte = pmap_pte(kernel_pmap, va);
259
260 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
261 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
262 SYSMAP(caddr_t ,mmap ,vmmap ,1 )
263 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 )
264 virtual_avail = va;
265 #endif
266
267 /**(int *)PTD = 0;
268 load_cr3(rcr3());*/
269
270 }
271
pmap_isvalidphys(addr)272 pmap_isvalidphys(addr) {
273 if (addr < 0xa0000) return (1);
274 if (addr >= 0x100000) return (1);
275 return(0);
276 }
277
278 /*
279 * Bootstrap memory allocator. This function allows for early dynamic
280 * memory allocation until the virtual memory system has been bootstrapped.
281 * After that point, either kmem_alloc or malloc should be used. This
282 * function works by stealing pages from the (to be) managed page pool,
283 * stealing virtual address space, then mapping the pages and zeroing them.
284 *
285 * It should be used from pmap_bootstrap till vm_page_startup, afterwards
286 * it cannot be used, and will generate a panic if tried. Note that this
287 * memory will never be freed, and in essence it is wired down.
288 */
289 void *
pmap_bootstrap_alloc(size)290 pmap_bootstrap_alloc(size) {
291 vm_offset_t val;
292 int i;
293 extern boolean_t vm_page_startup_initialized;
294
295 if (vm_page_startup_initialized)
296 panic("pmap_bootstrap_alloc: called after startup initialized");
297 size = round_page(size);
298 val = virtual_avail;
299
300 /* deal with "hole incursion" */
301 for (i = 0; i < size; i += PAGE_SIZE) {
302
303 while (!pmap_isvalidphys(avail_start))
304 avail_start += PAGE_SIZE;
305
306 virtual_avail = pmap_map(virtual_avail, avail_start,
307 avail_start + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
308 avail_start += PAGE_SIZE;
309 }
310
311 blkclr ((caddr_t) val, size);
312 return ((void *) val);
313 }
314
315 /*
316 * Initialize the pmap module.
317 * Called by vm_init, to initialize any structures that the pmap
318 * system needs to map virtual memory.
319 */
320 void
pmap_init(phys_start,phys_end)321 pmap_init(phys_start, phys_end)
322 vm_offset_t phys_start, phys_end;
323 {
324 vm_offset_t addr, addr2;
325 vm_size_t npg, s;
326 int rv;
327 extern int KPTphys;
328
329 #ifdef DEBUG
330 if (pmapdebug & PDB_FOLLOW)
331 printf("pmap_init(%x, %x)\n", phys_start, phys_end);
332 #endif
333 /*
334 * Now that kernel map has been allocated, we can mark as
335 * unavailable regions which we have mapped in locore.
336 */
337 addr = atdevbase;
338 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
339 &addr, (0x100000-0xa0000), FALSE);
340
341 addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;
342 vm_object_reference(kernel_object);
343 (void) vm_map_find(kernel_map, kernel_object, addr,
344 &addr, 2*NBPG, FALSE);
345
346 /*
347 * Allocate memory for random pmap data structures. Includes the
348 * pv_head_table and pmap_attributes.
349 */
350 npg = atop(phys_end - phys_start);
351 s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);
352 s = round_page(s);
353 addr = (vm_offset_t) kmem_alloc(kernel_map, s);
354 pv_table = (pv_entry_t) addr;
355 addr += sizeof(struct pv_entry) * npg;
356 pmap_attributes = (char *) addr;
357 #ifdef DEBUG
358 if (pmapdebug & PDB_INIT)
359 printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",
360 s, npg, pv_table, pmap_attributes);
361 #endif
362
363 /*
364 * Now it is safe to enable pv_table recording.
365 */
366 vm_first_phys = phys_start;
367 vm_last_phys = phys_end;
368 pmap_initialized = TRUE;
369 }
370
371 /*
372 * Used to map a range of physical addresses into kernel
373 * virtual address space.
374 *
375 * For now, VM is already on, we only need to map the
376 * specified memory.
377 */
378 vm_offset_t
pmap_map(virt,start,end,prot)379 pmap_map(virt, start, end, prot)
380 vm_offset_t virt;
381 vm_offset_t start;
382 vm_offset_t end;
383 int prot;
384 {
385 #ifdef DEBUG
386 if (pmapdebug & PDB_FOLLOW)
387 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
388 #endif
389 while (start < end) {
390 pmap_enter(kernel_pmap, virt, start, prot, FALSE);
391 virt += PAGE_SIZE;
392 start += PAGE_SIZE;
393 }
394 return(virt);
395 }
396
397 /*
398 * Create and return a physical map.
399 *
400 * If the size specified for the map
401 * is zero, the map is an actual physical
402 * map, and may be referenced by the
403 * hardware.
404 *
405 * If the size specified is non-zero,
406 * the map will be used in software only, and
407 * is bounded by that size.
408 *
409 * [ just allocate a ptd and mark it uninitialize -- should we track
410 * with a table which process has which ptd? -wfj ]
411 */
412
413 pmap_t
pmap_create(size)414 pmap_create(size)
415 vm_size_t size;
416 {
417 register pmap_t pmap;
418
419 #ifdef DEBUG
420 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
421 printf("pmap_create(%x)\n", size);
422 #endif
423 /*
424 * Software use map does not need a pmap
425 */
426 if (size)
427 return(NULL);
428
429 /* XXX: is it ok to wait here? */
430 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
431 #ifdef notifwewait
432 if (pmap == NULL)
433 panic("pmap_create: cannot allocate a pmap");
434 #endif
435 bzero(pmap, sizeof(*pmap));
436 pmap_pinit(pmap);
437 return (pmap);
438 }
439
440 /*
441 * Initialize a preallocated and zeroed pmap structure,
442 * such as one in a vmspace structure.
443 */
444 void
pmap_pinit(pmap)445 pmap_pinit(pmap)
446 register struct pmap *pmap;
447 {
448
449 #ifdef DEBUG
450 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
451 pg("pmap_pinit(%x)\n", pmap);
452 #endif
453
454 /*
455 * No need to allocate page table space yet but we do need a
456 * valid page directory table.
457 */
458 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
459
460 /* wire in kernel global address entries */
461 bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,
462 (KPTDI_LAST-KPTDI_FIRST+1)*4);
463
464 /* install self-referential address mapping entry */
465 *(int *)(pmap->pm_pdir+PTDPTDI) =
466 (int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_URKW;
467
468 pmap->pm_count = 1;
469 simple_lock_init(&pmap->pm_lock);
470 }
471
472 /*
473 * Retire the given physical map from service.
474 * Should only be called if the map contains
475 * no valid mappings.
476 */
477 void
pmap_destroy(pmap)478 pmap_destroy(pmap)
479 register pmap_t pmap;
480 {
481 int count;
482
483 #ifdef DEBUG
484 if (pmapdebug & PDB_FOLLOW)
485 printf("pmap_destroy(%x)\n", pmap);
486 #endif
487 if (pmap == NULL)
488 return;
489
490 simple_lock(&pmap->pm_lock);
491 count = --pmap->pm_count;
492 simple_unlock(&pmap->pm_lock);
493 if (count == 0) {
494 pmap_release(pmap);
495 free((caddr_t)pmap, M_VMPMAP);
496 }
497 }
498
499 /*
500 * Release any resources held by the given physical map.
501 * Called when a pmap initialized by pmap_pinit is being released.
502 * Should only be called if the map contains no valid mappings.
503 */
504 void
pmap_release(pmap)505 pmap_release(pmap)
506 register struct pmap *pmap;
507 {
508
509 #ifdef DEBUG
510 if (pmapdebug & PDB_FOLLOW)
511 pg("pmap_release(%x)\n", pmap);
512 #endif
513 #ifdef notdef /* DIAGNOSTIC */
514 /* count would be 0 from pmap_destroy... */
515 simple_lock(&pmap->pm_lock);
516 if (pmap->pm_count != 1)
517 panic("pmap_release count");
518 #endif
519 kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
520 }
521
522 /*
523 * Add a reference to the specified pmap.
524 */
525 void
pmap_reference(pmap)526 pmap_reference(pmap)
527 pmap_t pmap;
528 {
529 #ifdef DEBUG
530 if (pmapdebug & PDB_FOLLOW)
531 pg("pmap_reference(%x)", pmap);
532 #endif
533 if (pmap != NULL) {
534 simple_lock(&pmap->pm_lock);
535 pmap->pm_count++;
536 simple_unlock(&pmap->pm_lock);
537 }
538 }
539
540 /*
541 * Remove the given range of addresses from the specified map.
542 *
543 * It is assumed that the start and end are properly
544 * rounded to the page size.
545 */
546 void
pmap_remove(pmap,sva,eva)547 pmap_remove(pmap, sva, eva)
548 register struct pmap *pmap;
549 vm_offset_t sva, eva;
550 {
551 register vm_offset_t pa, va;
552 register pt_entry_t *pte;
553 register pv_entry_t pv, npv;
554 register int ix;
555 pmap_t ptpmap;
556 int *pde, s, bits;
557 boolean_t firstpage = TRUE;
558 boolean_t flushcache = FALSE;
559 #ifdef DEBUG
560 pt_entry_t opte;
561
562 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
563 printf("pmap_remove(%x, %x, %x)", pmap, sva, eva);
564 if (eva >= USRSTACK && eva <= UPT_MAX_ADDRESS)
565 nullop();
566 #endif
567
568 if (pmap == NULL)
569 return;
570
571 #ifdef DEBUG
572 remove_stats.calls++;
573 #endif
574 for (va = sva; va < eva; va += PAGE_SIZE) {
575 /*
576 * Weed out invalid mappings.
577 * Note: we assume that the page directory table is
578 * always allocated, and in kernel virtual.
579 */
580 if (!pmap_pde_v(pmap_pde(pmap, va)))
581 continue;
582
583 pte = pmap_pte(pmap, va);
584 if (pte == 0)
585 continue;
586 pa = pmap_pte_pa(pte);
587 if (pa == 0)
588 continue;
589 #ifdef DEBUG
590 opte = *pte;
591 remove_stats.removes++;
592 #endif
593 /*
594 * Update statistics
595 */
596 if (pmap_pte_w(pte))
597 pmap->pm_stats.wired_count--;
598 pmap->pm_stats.resident_count--;
599
600 /*
601 * Invalidate the PTEs.
602 * XXX: should cluster them up and invalidate as many
603 * as possible at once.
604 */
605 #ifdef DEBUG
606 if (pmapdebug & PDB_REMOVE)
607 printf("remove: inv %x ptes at %x(%x) ",
608 i386pagesperpage, pte, *(int *)pte);
609 #endif
610 bits = ix = 0;
611 do {
612 bits |= *(int *)pte & (PG_U|PG_M);
613 *(int *)pte++ = 0;
614 /*TBIS(va + ix * I386_PAGE_SIZE);*/
615 } while (++ix != i386pagesperpage);
616 if (pmap == &curproc->p_vmspace->vm_pmap)
617 pmap_activate(pmap, (struct pcb *)curproc->p_addr);
618 /* are we current address space or kernel? */
619 /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
620 || pmap == kernel_pmap)
621 load_cr3(curpcb->pcb_ptd);*/
622 tlbflush();
623
624 #ifdef needednotdone
625 reduce wiring count on page table pages as references drop
626 #endif
627
628 /*
629 * Remove from the PV table (raise IPL since we
630 * may be called at interrupt time).
631 */
632 if (pa < vm_first_phys || pa >= vm_last_phys)
633 continue;
634 pv = pa_to_pvh(pa);
635 s = splimp();
636 /*
637 * If it is the first entry on the list, it is actually
638 * in the header and we must copy the following entry up
639 * to the header. Otherwise we must search the list for
640 * the entry. In either case we free the now unused entry.
641 */
642 if (pmap == pv->pv_pmap && va == pv->pv_va) {
643 npv = pv->pv_next;
644 if (npv) {
645 *pv = *npv;
646 free((caddr_t)npv, M_VMPVENT);
647 } else
648 pv->pv_pmap = NULL;
649 #ifdef DEBUG
650 remove_stats.pvfirst++;
651 #endif
652 } else {
653 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
654 #ifdef DEBUG
655 remove_stats.pvsearch++;
656 #endif
657 if (pmap == npv->pv_pmap && va == npv->pv_va)
658 break;
659 pv = npv;
660 }
661 #ifdef DEBUG
662 if (npv == NULL)
663 panic("pmap_remove: PA not in pv_tab");
664 #endif
665 pv->pv_next = npv->pv_next;
666 free((caddr_t)npv, M_VMPVENT);
667 pv = pa_to_pvh(pa);
668 }
669
670 #ifdef notdef
671 [tally number of pagetable pages, if sharing of ptpages adjust here]
672 #endif
673 /*
674 * Update saved attributes for managed page
675 */
676 pmap_attributes[pa_index(pa)] |= bits;
677 splx(s);
678 }
679 #ifdef notdef
680 [cache and tlb flushing, if needed]
681 #endif
682 }
683
684 /*
685 * Routine: pmap_remove_all
686 * Function:
687 * Removes this physical page from
688 * all physical maps in which it resides.
689 * Reflects back modify bits to the pager.
690 */
691 void
pmap_remove_all(pa)692 pmap_remove_all(pa)
693 vm_offset_t pa;
694 {
695 register pv_entry_t pv;
696 int s;
697
698 #ifdef DEBUG
699 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
700 printf("pmap_remove_all(%x)", pa);
701 /*pmap_pvdump(pa);*/
702 #endif
703 /*
704 * Not one of ours
705 */
706 if (pa < vm_first_phys || pa >= vm_last_phys)
707 return;
708
709 pv = pa_to_pvh(pa);
710 s = splimp();
711 /*
712 * Do it the easy way for now
713 */
714 while (pv->pv_pmap != NULL) {
715 #ifdef DEBUG
716 if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) ||
717 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa)
718 panic("pmap_remove_all: bad mapping");
719 #endif
720 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE);
721 }
722 splx(s);
723 }
724
725 /*
726 * Routine: pmap_copy_on_write
727 * Function:
728 * Remove write privileges from all
729 * physical maps for this physical page.
730 */
731 void
pmap_copy_on_write(pa)732 pmap_copy_on_write(pa)
733 vm_offset_t pa;
734 {
735 #ifdef DEBUG
736 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
737 printf("pmap_copy_on_write(%x)", pa);
738 #endif
739 pmap_changebit(pa, PG_RO, TRUE);
740 }
741
742 /*
743 * Set the physical protection on the
744 * specified range of this map as requested.
745 */
746 void
pmap_protect(pmap,sva,eva,prot)747 pmap_protect(pmap, sva, eva, prot)
748 register pmap_t pmap;
749 vm_offset_t sva, eva;
750 vm_prot_t prot;
751 {
752 register pt_entry_t *pte;
753 register vm_offset_t va;
754 register int ix;
755 int i386prot;
756 boolean_t firstpage = TRUE;
757
758 #ifdef DEBUG
759 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
760 printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
761 #endif
762 if (pmap == NULL)
763 return;
764
765 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
766 pmap_remove(pmap, sva, eva);
767 return;
768 }
769 if (prot & VM_PROT_WRITE)
770 return;
771
772 for (va = sva; va < eva; va += PAGE_SIZE) {
773 /*
774 * Page table page is not allocated.
775 * Skip it, we don't want to force allocation
776 * of unnecessary PTE pages just to set the protection.
777 */
778 if (!pmap_pde_v(pmap_pde(pmap, va))) {
779 /* XXX: avoid address wrap around */
780 if (va >= i386_trunc_pdr((vm_offset_t)-1))
781 break;
782 va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
783 continue;
784 } else pte = pmap_pte(pmap, va);
785
786 /*
787 * Page not valid. Again, skip it.
788 * Should we do this? Or set protection anyway?
789 */
790 if (!pmap_pte_v(pte))
791 continue;
792
793 ix = 0;
794 i386prot = pte_prot(pmap, prot);
795 if(va < UPT_MAX_ADDRESS)
796 i386prot |= 2 /*PG_u*/;
797 do {
798 /* clear VAC here if PG_RO? */
799 pmap_pte_set_prot(pte++, i386prot);
800 /*TBIS(va + ix * I386_PAGE_SIZE);*/
801 } while (++ix != i386pagesperpage);
802 }
803 out:
804 if (pmap == &curproc->p_vmspace->vm_pmap)
805 pmap_activate(pmap, (struct pcb *)curproc->p_addr);
806 }
807
808 /*
809 * Insert the given physical page (p) at
810 * the specified virtual address (v) in the
811 * target physical map with the protection requested.
812 *
813 * If specified, the page will be wired down, meaning
814 * that the related pte can not be reclaimed.
815 *
816 * NB: This is the only routine which MAY NOT lazy-evaluate
817 * or lose information. That is, this routine must actually
818 * insert this page into the given map NOW.
819 */
820 void
pmap_enter(pmap,va,pa,prot,wired)821 pmap_enter(pmap, va, pa, prot, wired)
822 register pmap_t pmap;
823 vm_offset_t va;
824 register vm_offset_t pa;
825 vm_prot_t prot;
826 boolean_t wired;
827 {
828 register pt_entry_t *pte;
829 register int npte, ix;
830 vm_offset_t opa;
831 boolean_t cacheable = TRUE;
832 boolean_t checkpv = TRUE;
833
834 #ifdef DEBUG
835 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
836 printf("pmap_enter(%x, %x, %x, %x, %x)",
837 pmap, va, pa, prot, wired);
838 if(!pmap_isvalidphys(pa)) panic("invalid phys");
839 #endif
840 if (pmap == NULL)
841 return;
842
843 if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
844 /* also, should not muck with PTD va! */
845
846 #ifdef DEBUG
847 if (pmap == kernel_pmap)
848 enter_stats.kernel++;
849 else
850 enter_stats.user++;
851 #endif
852
853 /*
854 * Page Directory table entry not valid, we need a new PT page
855 */
856 if (!pmap_pde_v(pmap_pde(pmap, va))) {
857 pg("ptdi %x", pmap->pm_pdir[PTDPTDI]);
858 }
859
860 pte = pmap_pte(pmap, va);
861 opa = pmap_pte_pa(pte);
862 #ifdef DEBUG
863 if (pmapdebug & PDB_ENTER)
864 printf("enter: pte %x, *pte %x ", pte, *(int *)pte);
865 #endif
866
867 /*
868 * Mapping has not changed, must be protection or wiring change.
869 */
870 if (opa == pa) {
871 #ifdef DEBUG
872 enter_stats.pwchange++;
873 #endif
874 /*
875 * Wiring change, just update stats.
876 * We don't worry about wiring PT pages as they remain
877 * resident as long as there are valid mappings in them.
878 * Hence, if a user page is wired, the PT page will be also.
879 */
880 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
881 #ifdef DEBUG
882 if (pmapdebug & PDB_ENTER)
883 pg("enter: wiring change -> %x ", wired);
884 #endif
885 if (wired)
886 pmap->pm_stats.wired_count++;
887 else
888 pmap->pm_stats.wired_count--;
889 #ifdef DEBUG
890 enter_stats.wchange++;
891 #endif
892 }
893 goto validate;
894 }
895
896 /*
897 * Mapping has changed, invalidate old range and fall through to
898 * handle validating new mapping.
899 */
900 if (opa) {
901 #ifdef DEBUG
902 if (pmapdebug & PDB_ENTER)
903 printf("enter: removing old mapping %x pa %x ", va, opa);
904 #endif
905 pmap_remove(pmap, va, va + PAGE_SIZE);
906 #ifdef DEBUG
907 enter_stats.mchange++;
908 #endif
909 }
910
911 /*
912 * Enter on the PV list if part of our managed memory
913 * Note that we raise IPL while manipulating pv_table
914 * since pmap_enter can be called at interrupt time.
915 */
916 if (pa >= vm_first_phys && pa < vm_last_phys) {
917 register pv_entry_t pv, npv;
918 int s;
919
920 #ifdef DEBUG
921 enter_stats.managed++;
922 #endif
923 pv = pa_to_pvh(pa);
924 s = splimp();
925 #ifdef DEBUG
926 if (pmapdebug & PDB_ENTER)
927 printf("enter: pv at %x: %x/%x/%x ",
928 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
929 #endif
930 /*
931 * No entries yet, use header as the first entry
932 */
933 if (pv->pv_pmap == NULL) {
934 #ifdef DEBUG
935 enter_stats.firstpv++;
936 #endif
937 pv->pv_va = va;
938 pv->pv_pmap = pmap;
939 pv->pv_next = NULL;
940 pv->pv_flags = 0;
941 }
942 /*
943 * There is at least one other VA mapping this page.
944 * Place this entry after the header.
945 */
946 else {
947 /*printf("second time: ");*/
948 #ifdef DEBUG
949 for (npv = pv; npv; npv = npv->pv_next)
950 if (pmap == npv->pv_pmap && va == npv->pv_va)
951 panic("pmap_enter: already in pv_tab");
952 #endif
953 npv = (pv_entry_t)
954 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
955 npv->pv_va = va;
956 npv->pv_pmap = pmap;
957 npv->pv_next = pv->pv_next;
958 pv->pv_next = npv;
959 #ifdef DEBUG
960 if (!npv->pv_next)
961 enter_stats.secondpv++;
962 #endif
963 splx(s);
964 }
965 }
966 /*
967 * Assumption: if it is not part of our managed memory
968 * then it must be device memory which may be volitile.
969 */
970 if (pmap_initialized) {
971 checkpv = cacheable = FALSE;
972 #ifdef DEBUG
973 enter_stats.unmanaged++;
974 #endif
975 }
976
977 /*
978 * Increment counters
979 */
980 pmap->pm_stats.resident_count++;
981 if (wired)
982 pmap->pm_stats.wired_count++;
983
984 validate:
985 /*
986 * Now validate mapping with desired protection/wiring.
987 * Assume uniform modified and referenced status for all
988 * I386 pages in a MACH page.
989 */
990 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
991 npte |= (*(int *)pte & (PG_M|PG_U));
992 if (wired)
993 npte |= PG_W;
994 if(va < UPT_MIN_ADDRESS)
995 npte |= PG_u;
996 else if(va < UPT_MAX_ADDRESS)
997 npte |= PG_u | PG_RW;
998 #ifdef DEBUG
999 if (pmapdebug & PDB_ENTER)
1000 printf("enter: new pte value %x ", npte);
1001 #endif
1002 ix = 0;
1003 do {
1004 *(int *)pte++ = npte;
1005 /*TBIS(va);*/
1006 npte += I386_PAGE_SIZE;
1007 va += I386_PAGE_SIZE;
1008 } while (++ix != i386pagesperpage);
1009 pte--;
1010 #ifdef DEBUGx
1011 cache, tlb flushes
1012 #endif
1013 /*pads(pmap);*/
1014 /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/
1015 tlbflush();
1016 }
1017
1018 /*
1019 * pmap_page_protect:
1020 *
1021 * Lower the permission for all mappings to a given page.
1022 */
1023 void
pmap_page_protect(phys,prot)1024 pmap_page_protect(phys, prot)
1025 vm_offset_t phys;
1026 vm_prot_t prot;
1027 {
1028 switch (prot) {
1029 case VM_PROT_READ:
1030 case VM_PROT_READ|VM_PROT_EXECUTE:
1031 pmap_copy_on_write(phys);
1032 break;
1033 case VM_PROT_ALL:
1034 break;
1035 default:
1036 pmap_remove_all(phys);
1037 break;
1038 }
1039 }
1040
1041 /*
1042 * Routine: pmap_change_wiring
1043 * Function: Change the wiring attribute for a map/virtual-address
1044 * pair.
1045 * In/out conditions:
1046 * The mapping must already exist in the pmap.
1047 */
1048 void
pmap_change_wiring(pmap,va,wired)1049 pmap_change_wiring(pmap, va, wired)
1050 register pmap_t pmap;
1051 vm_offset_t va;
1052 boolean_t wired;
1053 {
1054 register pt_entry_t *pte;
1055 register int ix;
1056
1057 #ifdef DEBUG
1058 if (pmapdebug & PDB_FOLLOW)
1059 printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);
1060 #endif
1061 if (pmap == NULL)
1062 return;
1063
1064 pte = pmap_pte(pmap, va);
1065 #ifdef DEBUG
1066 /*
1067 * Page table page is not allocated.
1068 * Should this ever happen? Ignore it for now,
1069 * we don't want to force allocation of unnecessary PTE pages.
1070 */
1071 if (!pmap_pde_v(pmap_pde(pmap, va))) {
1072 if (pmapdebug & PDB_PARANOIA)
1073 pg("pmap_change_wiring: invalid PDE for %x ", va);
1074 return;
1075 }
1076 /*
1077 * Page not valid. Should this ever happen?
1078 * Just continue and change wiring anyway.
1079 */
1080 if (!pmap_pte_v(pte)) {
1081 if (pmapdebug & PDB_PARANOIA)
1082 pg("pmap_change_wiring: invalid PTE for %x ", va);
1083 }
1084 #endif
1085 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1086 if (wired)
1087 pmap->pm_stats.wired_count++;
1088 else
1089 pmap->pm_stats.wired_count--;
1090 }
1091 /*
1092 * Wiring is not a hardware characteristic so there is no need
1093 * to invalidate TLB.
1094 */
1095 ix = 0;
1096 do {
1097 pmap_pte_set_w(pte++, wired);
1098 } while (++ix != i386pagesperpage);
1099 }
1100
1101 /*
1102 * Routine: pmap_pte
1103 * Function:
1104 * Extract the page table entry associated
1105 * with the given map/virtual_address pair.
1106 * [ what about induced faults -wfj]
1107 */
1108
pmap_pte(pmap,va)1109 struct pte *pmap_pte(pmap, va)
1110 register pmap_t pmap;
1111 vm_offset_t va;
1112 {
1113
1114 #ifdef DEBUGx
1115 if (pmapdebug & PDB_FOLLOW)
1116 printf("pmap_pte(%x, %x) ->\n", pmap, va);
1117 #endif
1118 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1119
1120 /* are we current address space or kernel? */
1121 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
1122 || pmap == kernel_pmap)
1123 return ((struct pte *) vtopte(va));
1124
1125 /* otherwise, we are alternate address space */
1126 else {
1127 if (pmap->pm_pdir[PTDPTDI].pd_pfnum
1128 != APTDpde.pd_pfnum) {
1129 APTDpde = pmap->pm_pdir[PTDPTDI];
1130 tlbflush();
1131 }
1132 return((struct pte *) avtopte(va));
1133 }
1134 }
1135 return(0);
1136 }
1137
1138 /*
1139 * Routine: pmap_extract
1140 * Function:
1141 * Extract the physical page address associated
1142 * with the given map/virtual_address pair.
1143 */
1144
1145 vm_offset_t
pmap_extract(pmap,va)1146 pmap_extract(pmap, va)
1147 register pmap_t pmap;
1148 vm_offset_t va;
1149 {
1150 register vm_offset_t pa;
1151
1152 #ifdef DEBUGx
1153 if (pmapdebug & PDB_FOLLOW)
1154 pg("pmap_extract(%x, %x) -> ", pmap, va);
1155 #endif
1156 pa = 0;
1157 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
1158 pa = *(int *) pmap_pte(pmap, va);
1159 }
1160 if (pa)
1161 pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
1162 #ifdef DEBUGx
1163 if (pmapdebug & PDB_FOLLOW)
1164 printf("%x\n", pa);
1165 #endif
1166 return(pa);
1167 }
1168
1169 /*
1170 * Copy the range specified by src_addr/len
1171 * from the source map to the range dst_addr/len
1172 * in the destination map.
1173 *
1174 * This routine is only advisory and need not do anything.
1175 */
pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)1176 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1177 pmap_t dst_pmap;
1178 pmap_t src_pmap;
1179 vm_offset_t dst_addr;
1180 vm_size_t len;
1181 vm_offset_t src_addr;
1182 {
1183 #ifdef DEBUG
1184 if (pmapdebug & PDB_FOLLOW)
1185 printf("pmap_copy(%x, %x, %x, %x, %x)",
1186 dst_pmap, src_pmap, dst_addr, len, src_addr);
1187 #endif
1188 }
1189
1190 /*
1191 * Require that all active physical maps contain no
1192 * incorrect entries NOW. [This update includes
1193 * forcing updates of any address map caching.]
1194 *
1195 * Generally used to insure that a thread about
1196 * to run will see a semantically correct world.
1197 */
pmap_update()1198 void pmap_update()
1199 {
1200 #ifdef DEBUG
1201 if (pmapdebug & PDB_FOLLOW)
1202 printf("pmap_update()");
1203 #endif
1204 tlbflush();
1205 }
1206
1207 /*
1208 * Routine: pmap_collect
1209 * Function:
1210 * Garbage collects the physical map system for
1211 * pages which are no longer used.
1212 * Success need not be guaranteed -- that is, there
1213 * may well be pages which are not referenced, but
1214 * others may be collected.
1215 * Usage:
1216 * Called by the pageout daemon when pages are scarce.
1217 * [ needs to be written -wfj ]
1218 */
1219 void
pmap_collect(pmap)1220 pmap_collect(pmap)
1221 pmap_t pmap;
1222 {
1223 register vm_offset_t pa;
1224 register pv_entry_t pv;
1225 register int *pte;
1226 vm_offset_t kpa;
1227 int s;
1228
1229 #ifdef DEBUG
1230 int *pde;
1231 int opmapdebug;
1232 printf("pmap_collect(%x) ", pmap);
1233 #endif
1234 if (pmap != kernel_pmap)
1235 return;
1236
1237 }
1238
1239 /* [ macro again?, should I force kstack into user map here? -wfj ] */
1240 void
pmap_activate(pmap,pcbp)1241 pmap_activate(pmap, pcbp)
1242 register pmap_t pmap;
1243 struct pcb *pcbp;
1244 {
1245 int x;
1246 #ifdef DEBUG
1247 if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB))
1248 pg("pmap_activate(%x, %x) ", pmap, pcbp);
1249 #endif
1250 PMAP_ACTIVATE(pmap, pcbp);
1251 /*printf("pde ");
1252 for(x=0x3f6; x < 0x3fA; x++)
1253 printf("%x ", pmap->pm_pdir[x]);*/
1254 /*pads(pmap);*/
1255 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/
1256 }
1257
1258 /*
1259 * pmap_zero_page zeros the specified (machine independent)
1260 * page by mapping the page into virtual memory and using
1261 * bzero to clear its contents, one machine dependent page
1262 * at a time.
1263 */
1264 void
pmap_zero_page(phys)1265 pmap_zero_page(phys)
1266 register vm_offset_t phys;
1267 {
1268 register int ix;
1269
1270 #ifdef DEBUG
1271 if (pmapdebug & PDB_FOLLOW)
1272 printf("pmap_zero_page(%x)", phys);
1273 #endif
1274 phys >>= PG_SHIFT;
1275 ix = 0;
1276 do {
1277 clearseg(phys++);
1278 } while (++ix != i386pagesperpage);
1279 }
1280
1281 /*
1282 * pmap_copy_page copies the specified (machine independent)
1283 * page by mapping the page into virtual memory and using
1284 * bcopy to copy the page, one machine dependent page at a
1285 * time.
1286 */
1287 void
pmap_copy_page(src,dst)1288 pmap_copy_page(src, dst)
1289 register vm_offset_t src, dst;
1290 {
1291 register int ix;
1292
1293 #ifdef DEBUG
1294 if (pmapdebug & PDB_FOLLOW)
1295 printf("pmap_copy_page(%x, %x)", src, dst);
1296 #endif
1297 src >>= PG_SHIFT;
1298 dst >>= PG_SHIFT;
1299 ix = 0;
1300 do {
1301 physcopyseg(src++, dst++);
1302 } while (++ix != i386pagesperpage);
1303 }
1304
1305
1306 /*
1307 * Routine: pmap_pageable
1308 * Function:
1309 * Make the specified pages (by pmap, offset)
1310 * pageable (or not) as requested.
1311 *
1312 * A page which is not pageable may not take
1313 * a fault; therefore, its page table entry
1314 * must remain valid for the duration.
1315 *
1316 * This routine is merely advisory; pmap_enter
1317 * will specify that these pages are to be wired
1318 * down (or not) as appropriate.
1319 */
1320 void
pmap_pageable(pmap,sva,eva,pageable)1321 pmap_pageable(pmap, sva, eva, pageable)
1322 pmap_t pmap;
1323 vm_offset_t sva, eva;
1324 boolean_t pageable;
1325 {
1326 #ifdef DEBUG
1327 if (pmapdebug & PDB_FOLLOW)
1328 printf("pmap_pageable(%x, %x, %x, %x)",
1329 pmap, sva, eva, pageable);
1330 #endif
1331 /*
1332 * If we are making a PT page pageable then all valid
1333 * mappings must be gone from that page. Hence it should
1334 * be all zeros and there is no need to clean it.
1335 * Assumptions:
1336 * - we are called with only one page at a time
1337 * - PT pages have only one pv_table entry
1338 */
1339 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
1340 register pv_entry_t pv;
1341 register vm_offset_t pa;
1342
1343 #ifdef DEBUG
1344 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
1345 printf("pmap_pageable(%x, %x, %x, %x)",
1346 pmap, sva, eva, pageable);
1347 #endif
1348 /*if (!pmap_pde_v(pmap_pde(pmap, sva)))
1349 return;*/
1350 if(pmap_pte(pmap, sva) == 0)
1351 return;
1352 pa = pmap_pte_pa(pmap_pte(pmap, sva));
1353 if (pa < vm_first_phys || pa >= vm_last_phys)
1354 return;
1355 pv = pa_to_pvh(pa);
1356 /*if (!ispt(pv->pv_va))
1357 return;*/
1358 #ifdef DEBUG
1359 if (pv->pv_va != sva || pv->pv_next) {
1360 pg("pmap_pageable: bad PT page va %x next %x\n",
1361 pv->pv_va, pv->pv_next);
1362 return;
1363 }
1364 #endif
1365 /*
1366 * Mark it unmodified to avoid pageout
1367 */
1368 pmap_clear_modify(pa);
1369 #ifdef needsomethinglikethis
1370 if (pmapdebug & PDB_PTPAGE)
1371 pg("pmap_pageable: PT page %x(%x) unmodified\n",
1372 sva, *(int *)pmap_pte(pmap, sva));
1373 if (pmapdebug & PDB_WIRING)
1374 pmap_check_wiring("pageable", sva);
1375 #endif
1376 }
1377 }
1378
1379 /*
1380 * Clear the modify bits on the specified physical page.
1381 */
1382
1383 void
pmap_clear_modify(pa)1384 pmap_clear_modify(pa)
1385 vm_offset_t pa;
1386 {
1387 #ifdef DEBUG
1388 if (pmapdebug & PDB_FOLLOW)
1389 printf("pmap_clear_modify(%x)", pa);
1390 #endif
1391 pmap_changebit(pa, PG_M, FALSE);
1392 }
1393
1394 /*
1395 * pmap_clear_reference:
1396 *
1397 * Clear the reference bit on the specified physical page.
1398 */
1399
pmap_clear_reference(pa)1400 void pmap_clear_reference(pa)
1401 vm_offset_t pa;
1402 {
1403 #ifdef DEBUG
1404 if (pmapdebug & PDB_FOLLOW)
1405 printf("pmap_clear_reference(%x)", pa);
1406 #endif
1407 pmap_changebit(pa, PG_U, FALSE);
1408 }
1409
1410 /*
1411 * pmap_is_referenced:
1412 *
1413 * Return whether or not the specified physical page is referenced
1414 * by any physical maps.
1415 */
1416
1417 boolean_t
pmap_is_referenced(pa)1418 pmap_is_referenced(pa)
1419 vm_offset_t pa;
1420 {
1421 #ifdef DEBUG
1422 if (pmapdebug & PDB_FOLLOW) {
1423 boolean_t rv = pmap_testbit(pa, PG_U);
1424 printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]);
1425 return(rv);
1426 }
1427 #endif
1428 return(pmap_testbit(pa, PG_U));
1429 }
1430
1431 /*
1432 * pmap_is_modified:
1433 *
1434 * Return whether or not the specified physical page is modified
1435 * by any physical maps.
1436 */
1437
1438 boolean_t
pmap_is_modified(pa)1439 pmap_is_modified(pa)
1440 vm_offset_t pa;
1441 {
1442 #ifdef DEBUG
1443 if (pmapdebug & PDB_FOLLOW) {
1444 boolean_t rv = pmap_testbit(pa, PG_M);
1445 printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]);
1446 return(rv);
1447 }
1448 #endif
1449 return(pmap_testbit(pa, PG_M));
1450 }
1451
1452 vm_offset_t
pmap_phys_address(ppn)1453 pmap_phys_address(ppn)
1454 int ppn;
1455 {
1456 return(i386_ptob(ppn));
1457 }
1458
1459 /*
1460 * Miscellaneous support routines follow
1461 */
1462
i386_protection_init()1463 i386_protection_init()
1464 {
1465 register int *kp, prot;
1466
1467 kp = protection_codes;
1468 for (prot = 0; prot < 8; prot++) {
1469 switch (prot) {
1470 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1471 *kp++ = 0;
1472 break;
1473 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1474 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1475 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1476 *kp++ = PG_RO;
1477 break;
1478 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1479 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1480 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1481 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1482 *kp++ = PG_RW;
1483 break;
1484 }
1485 }
1486 }
1487
1488 static
1489 boolean_t
pmap_testbit(pa,bit)1490 pmap_testbit(pa, bit)
1491 register vm_offset_t pa;
1492 int bit;
1493 {
1494 register pv_entry_t pv;
1495 register int *pte, ix;
1496 int s;
1497
1498 if (pa < vm_first_phys || pa >= vm_last_phys)
1499 return(FALSE);
1500
1501 pv = pa_to_pvh(pa);
1502 s = splimp();
1503 /*
1504 * Check saved info first
1505 */
1506 if (pmap_attributes[pa_index(pa)] & bit) {
1507 splx(s);
1508 return(TRUE);
1509 }
1510 /*
1511 * Not found, check current mappings returning
1512 * immediately if found.
1513 */
1514 if (pv->pv_pmap != NULL) {
1515 for (; pv; pv = pv->pv_next) {
1516 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1517 ix = 0;
1518 do {
1519 if (*pte++ & bit) {
1520 splx(s);
1521 return(TRUE);
1522 }
1523 } while (++ix != i386pagesperpage);
1524 }
1525 }
1526 splx(s);
1527 return(FALSE);
1528 }
1529
pmap_changebit(pa,bit,setem)1530 pmap_changebit(pa, bit, setem)
1531 register vm_offset_t pa;
1532 int bit;
1533 boolean_t setem;
1534 {
1535 register pv_entry_t pv;
1536 register int *pte, npte, ix;
1537 vm_offset_t va;
1538 int s;
1539 boolean_t firstpage = TRUE;
1540
1541 #ifdef DEBUG
1542 if (pmapdebug & PDB_BITS)
1543 printf("pmap_changebit(%x, %x, %s)",
1544 pa, bit, setem ? "set" : "clear");
1545 #endif
1546 if (pa < vm_first_phys || pa >= vm_last_phys)
1547 return;
1548
1549 pv = pa_to_pvh(pa);
1550 s = splimp();
1551 /*
1552 * Clear saved attributes (modify, reference)
1553 */
1554 if (!setem)
1555 pmap_attributes[pa_index(pa)] &= ~bit;
1556 /*
1557 * Loop over all current mappings setting/clearing as appropos
1558 * If setting RO do we need to clear the VAC?
1559 */
1560 if (pv->pv_pmap != NULL) {
1561 #ifdef DEBUG
1562 int toflush = 0;
1563 #endif
1564 for (; pv; pv = pv->pv_next) {
1565 #ifdef DEBUG
1566 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
1567 #endif
1568 va = pv->pv_va;
1569
1570 /*
1571 * XXX don't write protect pager mappings
1572 */
1573 if (bit == PG_RO) {
1574 extern vm_offset_t pager_sva, pager_eva;
1575
1576 if (va >= pager_sva && va < pager_eva)
1577 continue;
1578 }
1579
1580 pte = (int *) pmap_pte(pv->pv_pmap, va);
1581 ix = 0;
1582 do {
1583 if (setem)
1584 npte = *pte | bit;
1585 else
1586 npte = *pte & ~bit;
1587 if (*pte != npte) {
1588 *pte = npte;
1589 /*TBIS(va);*/
1590 }
1591 va += I386_PAGE_SIZE;
1592 pte++;
1593 } while (++ix != i386pagesperpage);
1594
1595 if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap)
1596 pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr);
1597 }
1598 #ifdef somethinglikethis
1599 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
1600 if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
1601 DCIA();
1602 else if (toflush == 2)
1603 DCIS();
1604 else
1605 DCIU();
1606 }
1607 #endif
1608 }
1609 splx(s);
1610 }
1611
1612 #ifdef DEBUG
pmap_pvdump(pa)1613 pmap_pvdump(pa)
1614 vm_offset_t pa;
1615 {
1616 register pv_entry_t pv;
1617
1618 printf("pa %x", pa);
1619 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1620 printf(" -> pmap %x, va %x, flags %x",
1621 pv->pv_pmap, pv->pv_va, pv->pv_flags);
1622 pads(pv->pv_pmap);
1623 }
1624 printf(" ");
1625 }
1626
1627 #ifdef notyet
pmap_check_wiring(str,va)1628 pmap_check_wiring(str, va)
1629 char *str;
1630 vm_offset_t va;
1631 {
1632 vm_map_entry_t entry;
1633 register int count, *pte;
1634
1635 va = trunc_page(va);
1636 if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1637 !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1638 return;
1639
1640 if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1641 pg("wired_check: entry for %x not found\n", va);
1642 return;
1643 }
1644 count = 0;
1645 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1646 if (*pte)
1647 count++;
1648 if (entry->wired_count != count)
1649 pg("*%s*: %x: w%d/a%d\n",
1650 str, va, entry->wired_count, count);
1651 }
1652 #endif
1653
1654 /* print address space of pmap*/
pads(pm)1655 pads(pm) pmap_t pm; {
1656 unsigned va, i, j;
1657 struct pte *ptep;
1658
1659 if(pm == kernel_pmap) return;
1660 for (i = 0; i < 1024; i++)
1661 if(pm->pm_pdir[i].pd_v)
1662 for (j = 0; j < 1024 ; j++) {
1663 va = (i<<22)+(j<<12);
1664 if (pm == kernel_pmap && va < 0xfe000000)
1665 continue;
1666 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1667 continue;
1668 ptep = pmap_pte(pm, va);
1669 if(pmap_pte_v(ptep))
1670 printf("%x:%x ", va, *(int *)ptep);
1671 } ;
1672
1673 }
1674 #endif
1675