1 /* $NetBSD: pmap_motorola.c,v 1.89 2024/01/19 03:35:31 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1991, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * This code is derived from software contributed to Berkeley by
37 * the Systems Programming Group of the University of Utah Computer
38 * Science Department.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)pmap.c 8.6 (Berkeley) 5/27/94
65 */
66
67 /*
68 * Motorola m68k-family physical map management code.
69 *
70 * Supports:
71 * 68020 with 68851 MMU
72 * 68020 with HP MMU
73 * 68030 with on-chip MMU
74 * 68040 with on-chip MMU
75 * 68060 with on-chip MMU
76 *
77 * Notes:
78 * Don't even pay lip service to multiprocessor support.
79 *
80 * We assume TLB entries don't have process tags (except for the
81 * supervisor/user distinction) so we only invalidate TLB entries
82 * when changing mappings for the current (or kernel) pmap. This is
83 * technically not true for the 68851 but we flush the TLB on every
84 * context switch, so it effectively winds up that way.
85 *
86 * Bitwise and/or operations are significantly faster than bitfield
87 * references so we use them when accessing STE/PTEs in the pmap_pte_*
88 * macros. Note also that the two are not always equivalent; e.g.:
89 * (*pte & PG_PROT) [4] != pte->pg_prot [1]
90 * and a couple of routines that deal with protection and wiring take
91 * some shortcuts that assume the and/or definitions.
92 */
93
94 /*
95 * Manages physical address maps.
96 *
97 * In addition to hardware address maps, this
98 * module is called upon to provide software-use-only
99 * maps which may or may not be stored in the same
100 * form as hardware maps. These pseudo-maps are
101 * used to store intermediate results from copy
102 * operations to and from address spaces.
103 *
104 * Since the information managed by this module is
105 * also stored by the logical address mapping module,
106 * this module may throw away valid virtual-to-physical
107 * mappings at almost any time. However, invalidations
108 * of virtual-to-physical mappings must be done as
109 * requested.
110 *
111 * In order to cope with hardware architectures which
112 * make virtual-to-physical map invalidates expensive,
113 * this module may delay invalidate or reduced protection
114 * operations until such time as they are actually
115 * necessary. This module is given full information as
116 * to which processors are currently using which maps,
117 * and to when physical maps must be made correct.
118 */
119
120 #include "opt_m68k_arch.h"
121
122 #include <sys/cdefs.h>
123 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.89 2024/01/19 03:35:31 thorpej Exp $");
124
125 #include <sys/param.h>
126 #include <sys/systm.h>
127 #include <sys/proc.h>
128 #include <sys/pool.h>
129 #include <sys/cpu.h>
130 #include <sys/atomic.h>
131
132 #include <machine/pte.h>
133 #include <machine/pcb.h>
134
135 #include <uvm/uvm.h>
136 #include <uvm/uvm_physseg.h>
137
138 #include <m68k/cacheops.h>
139
140 #if !defined(M68K_MMU_MOTOROLA) && !defined(M68K_MMU_HP)
141 #error Hit the road, Jack...
142 #endif
143
144 #ifdef DEBUG
145 #define PDB_FOLLOW 0x0001
146 #define PDB_INIT 0x0002
147 #define PDB_ENTER 0x0004
148 #define PDB_REMOVE 0x0008
149 #define PDB_CREATE 0x0010
150 #define PDB_PTPAGE 0x0020
151 #define PDB_CACHE 0x0040
152 #define PDB_BITS 0x0080
153 #define PDB_COLLECT 0x0100
154 #define PDB_PROTECT 0x0200
155 #define PDB_SEGTAB 0x0400
156 #define PDB_MULTIMAP 0x0800
157 #define PDB_PARANOIA 0x2000
158 #define PDB_WIRING 0x4000
159 #define PDB_PVDUMP 0x8000
160
161 int debugmap = 0;
162 int pmapdebug = PDB_PARANOIA;
163
164 #define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
165 #else /* ! DEBUG */
166 #define PMAP_DPRINTF(l, x) /* nothing */
167 #endif /* DEBUG */
168
169 /*
170 * Get STEs and PTEs for user/kernel address space
171 */
172 #if defined(M68040) || defined(M68060)
173 #define pmap_ste1(m, v) \
174 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
175 /* XXX assumes physically contiguous ST pages (if more than one) */
176 #define pmap_ste2(m, v) \
177 (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
178 - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
179 #if defined(M68020) || defined(M68030)
180 #define pmap_ste(m, v) \
181 (&((m)->pm_stab[(vaddr_t)(v) \
182 >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
183 #define pmap_ste_v(m, v) \
184 (mmutype == MMU_68040 \
185 ? ((*pmap_ste1(m, v) & SG_V) && \
186 (*pmap_ste2(m, v) & SG_V)) \
187 : (*pmap_ste(m, v) & SG_V))
188 #else
189 #define pmap_ste(m, v) \
190 (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
191 #define pmap_ste_v(m, v) \
192 ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
193 #endif
194 #else
195 #define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
196 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
197 #endif
198
199 #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
200 #define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
201 #define pmap_pte_w(pte) (*(pte) & PG_W)
202 #define pmap_pte_ci(pte) (*(pte) & PG_CI)
203 #define pmap_pte_m(pte) (*(pte) & PG_M)
204 #define pmap_pte_u(pte) (*(pte) & PG_U)
205 #define pmap_pte_prot(pte) (*(pte) & PG_PROT)
206 #define pmap_pte_v(pte) (*(pte) & PG_V)
207
208 #define pmap_pte_set_w(pte, v) \
209 if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
210 #define pmap_pte_set_prot(pte, v) \
211 if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
212 #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
213 #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
214
215 /*
216 * Given a map and a machine independent protection code,
217 * convert to an m68k protection code.
218 */
219 #define pte_prot(m, p) (protection_codes[p])
220 u_int protection_codes[8];
221
222 /*
223 * Kernel page table page management.
224 */
225 struct kpt_page {
226 struct kpt_page *kpt_next; /* link on either used or free list */
227 vaddr_t kpt_va; /* always valid kernel VA */
228 paddr_t kpt_pa; /* PA of this page (for speed) */
229 };
230 struct kpt_page *kpt_free_list, *kpt_used_list;
231 struct kpt_page *kpt_pages;
232
233 /*
234 * Kernel segment/page table and page table map.
235 * The page table map gives us a level of indirection we need to dynamically
236 * expand the page table. It is essentially a copy of the segment table
237 * with PTEs instead of STEs. All are initialized in locore at boot time.
238 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
239 * Segtabzero is an empty segment table which all processes share til they
240 * reference something.
241 */
242 paddr_t Sysseg_pa;
243 st_entry_t *Sysseg;
244 pt_entry_t *Sysmap, *Sysptmap;
245 st_entry_t *Segtabzero, *Segtabzeropa;
246 vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
247
248 static struct pmap kernel_pmap_store;
249 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
250 struct vm_map *st_map, *pt_map;
251 struct vm_map st_map_store, pt_map_store;
252
253 vaddr_t lwp0uarea; /* lwp0 u-area VA, initialized in bootstrap */
254
255 paddr_t avail_start; /* PA of first available physical page */
256 paddr_t avail_end; /* PA of last available physical page */
257 vsize_t mem_size; /* memory size in bytes */
258 vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
259 vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
260 int page_cnt; /* number of pages managed by VM system */
261
262 bool pmap_initialized = false; /* Has pmap_init completed? */
263
264 vaddr_t m68k_uptbase = M68K_PTBASE;
265
266 struct pv_header {
267 struct pv_entry pvh_first; /* first PV entry */
268 uint16_t pvh_attrs; /* attributes:
269 bits 0-7: PTE bits
270 bits 8-15: flags */
271 uint16_t pvh_cimappings; /* # caller-specified CI
272 mappings */
273 };
274
275 #define PVH_CI 0x10 /* all entries are cache-inhibited */
276 #define PVH_PTPAGE 0x20 /* entry maps a page table page */
277
278 struct pv_header *pv_table;
279 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
280 int pv_nfree;
281
282 #ifdef CACHE_HAVE_VAC
283 u_int pmap_aliasmask; /* separation at which VA aliasing ok */
284 #endif
285 #if defined(M68040) || defined(M68060)
286 u_int protostfree; /* prototype (default) free ST map */
287 #endif
288
289 pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
290 pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
291
292 struct pool pmap_pmap_pool; /* memory pool for pmap structures */
293 struct pool pmap_pv_pool; /* memory pool for pv entries */
294
295 #define pmap_alloc_pv() pool_get(&pmap_pv_pool, PR_NOWAIT)
296 #define pmap_free_pv(pv) pool_put(&pmap_pv_pool, (pv))
297
298 #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa))
299
300 static inline struct pv_header *
pa_to_pvh(paddr_t pa)301 pa_to_pvh(paddr_t pa)
302 {
303 uvm_physseg_t bank = 0; /* XXX gcc4 -Wuninitialized */
304 psize_t pg = 0;
305
306 bank = uvm_physseg_find(atop((pa)), &pg);
307 return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
308 }
309
310 /*
311 * Internal routines
312 */
313 void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
314 struct pv_entry **);
315 bool pmap_testbit(paddr_t, int);
316 bool pmap_changebit(paddr_t, pt_entry_t, pt_entry_t);
317 int pmap_enter_ptpage(pmap_t, vaddr_t, bool);
318 void pmap_ptpage_addref(vaddr_t);
319 int pmap_ptpage_delref(vaddr_t);
320 void pmap_pinit(pmap_t);
321 void pmap_release(pmap_t);
322
323 #ifdef DEBUG
324 void pmap_pvdump(paddr_t);
325 void pmap_check_wiring(const char *, vaddr_t);
326 #endif
327
328 /* pmap_remove_mapping flags */
329 #define PRM_TFLUSH 0x01
330 #define PRM_CFLUSH 0x02
331 #define PRM_KEEPPTPAGE 0x04
332
333 #define active_pmap(pm) \
334 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
335
336 #define active_user_pmap(pm) \
337 (curproc && \
338 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
339
340 static void (*pmap_load_urp_func)(paddr_t);
341
342 /*
343 * pmap_load_urp:
344 *
345 * Load the user root table into the MMU.
346 */
347 static inline void
pmap_load_urp(paddr_t urp)348 pmap_load_urp(paddr_t urp)
349 {
350 (*pmap_load_urp_func)(urp);
351 }
352
353 /*
354 * pmap_bootstrap_finalize: [ INTERFACE ]
355 *
356 * Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
357 * using lwp0uarea variable saved during pmap_bootstrap().
358 */
359 void
pmap_bootstrap_finalize(void)360 pmap_bootstrap_finalize(void)
361 {
362
363 #if !defined(amiga) && !defined(atari)
364 /*
365 * XXX
366 * amiga and atari have different pmap initialization functions
367 * and they require this earlier.
368 */
369 uvmexp.pagesize = NBPG;
370 uvm_md_init();
371 #endif
372
373 /*
374 * Initialize protection array.
375 * XXX: Could this have port specific values? Can't this be static?
376 */
377 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
378 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
379 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
380 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
381 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
382 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
383 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
384 protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
385
386 /*
387 * Initialize pmap_kernel().
388 */
389 pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
390 pmap_kernel()->pm_stab = Sysseg;
391 pmap_kernel()->pm_ptab = Sysmap;
392 #if defined(M68040) || defined(M68060)
393 if (mmutype == MMU_68040)
394 pmap_kernel()->pm_stfree = protostfree;
395 #endif
396 pmap_kernel()->pm_count = 1;
397
398 /*
399 * Initialize lwp0 uarea, curlwp, and curpcb.
400 */
401 memset((void *)lwp0uarea, 0, USPACE);
402 uvm_lwp_setuarea(&lwp0, lwp0uarea);
403 curlwp = &lwp0;
404 curpcb = lwp_getpcb(&lwp0);
405 }
406
407 /*
408 * pmap_virtual_space: [ INTERFACE ]
409 *
410 * Report the range of available kernel virtual address
411 * space to the VM system during bootstrap.
412 *
413 * This is only an interface function if we do not use
414 * pmap_steal_memory()!
415 *
416 * Note: no locking is necessary in this function.
417 */
418 void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)419 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
420 {
421
422 *vstartp = virtual_avail;
423 *vendp = virtual_end;
424 }
425
426 /*
427 * pmap_init: [ INTERFACE ]
428 *
429 * Initialize the pmap module. Called by vm_init(), to initialize any
430 * structures that the pmap system needs to map virtual memory.
431 *
432 * Note: no locking is necessary in this function.
433 */
434 void
pmap_init(void)435 pmap_init(void)
436 {
437 vaddr_t addr, addr2;
438 vsize_t s;
439 struct pv_header *pvh;
440 int rv;
441 int npages;
442 uvm_physseg_t bank;
443
444 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
445
446 /*
447 * Before we do anything else, initialize the PTE pointers
448 * used by pmap_zero_page() and pmap_copy_page().
449 */
450 caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
451 caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
452
453 PMAP_DPRINTF(PDB_INIT,
454 ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
455 Sysseg, Sysmap, Sysptmap));
456 PMAP_DPRINTF(PDB_INIT,
457 (" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
458 avail_start, avail_end, virtual_avail, virtual_end));
459
460 /*
461 * Allocate memory for random pmap data structures. Includes the
462 * initial segment table, pv_head_table and pmap_attributes.
463 */
464 for (page_cnt = 0, bank = uvm_physseg_get_first();
465 uvm_physseg_valid_p(bank);
466 bank = uvm_physseg_get_next(bank))
467 page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
468 s = M68K_STSIZE; /* Segtabzero */
469 s += page_cnt * sizeof(struct pv_header); /* pv table */
470 s = round_page(s);
471 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
472 if (addr == 0)
473 panic("pmap_init: can't allocate data structures");
474
475 Segtabzero = (st_entry_t *)addr;
476 (void)pmap_extract(pmap_kernel(), addr,
477 (paddr_t *)(void *)&Segtabzeropa);
478 addr += M68K_STSIZE;
479
480 pv_table = (struct pv_header *) addr;
481 addr += page_cnt * sizeof(struct pv_header);
482
483 PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
484 "tbl %p\n",
485 s, page_cnt, Segtabzero, Segtabzeropa,
486 pv_table));
487
488 /*
489 * Now that the pv and attribute tables have been allocated,
490 * assign them to the memory segments.
491 */
492 pvh = pv_table;
493 for (bank = uvm_physseg_get_first();
494 uvm_physseg_valid_p(bank);
495 bank = uvm_physseg_get_next(bank)) {
496 npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
497 uvm_physseg_get_pmseg(bank)->pvheader = pvh;
498 pvh += npages;
499 }
500
501 /*
502 * Allocate physical memory for kernel PT pages and their management.
503 * We need 1 PT page per possible task plus some slop.
504 */
505 npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16);
506 s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
507
508 /*
509 * Verify that space will be allocated in region for which
510 * we already have kernel PT pages.
511 */
512 addr = 0;
513 rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
514 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
515 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
516 if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
517 panic("pmap_init: kernel PT too small");
518 uvm_unmap(kernel_map, addr, addr + s);
519
520 /*
521 * Now allocate the space and link the pages together to
522 * form the KPT free list.
523 */
524 addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
525 if (addr == 0)
526 panic("pmap_init: cannot allocate KPT free list");
527 s = ptoa(npages);
528 addr2 = addr + s;
529 kpt_pages = &((struct kpt_page *)addr2)[npages];
530 kpt_free_list = NULL;
531 do {
532 addr2 -= PAGE_SIZE;
533 (--kpt_pages)->kpt_next = kpt_free_list;
534 kpt_free_list = kpt_pages;
535 kpt_pages->kpt_va = addr2;
536 (void) pmap_extract(pmap_kernel(), addr2,
537 (paddr_t *)&kpt_pages->kpt_pa);
538 } while (addr != addr2);
539
540 PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
541 atop(s), addr, addr + s));
542
543 /*
544 * Allocate the segment table map and the page table map.
545 */
546 s = maxproc * M68K_STSIZE;
547 st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
548 &st_map_store);
549
550 addr = m68k_uptbase;
551 if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
552 s = M68K_PTMAXSIZE;
553 /*
554 * XXX We don't want to hang when we run out of
555 * page tables, so we lower maxproc so that fork()
556 * will fail instead. Note that root could still raise
557 * this value via sysctl(3).
558 */
559 maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
560 } else
561 s = (maxproc * M68K_MAX_PTSIZE);
562 pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
563 true, &pt_map_store);
564
565 #if defined(M68040) || defined(M68060)
566 if (mmutype == MMU_68040) {
567 protostfree = ~l2tobm(0);
568 for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
569 protostfree &= ~l2tobm(rv);
570 }
571 #endif
572
573 /*
574 * Initialize the pmap pools.
575 */
576 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
577 &pool_allocator_nointr, IPL_NONE);
578
579 /*
580 * Initialize the pv_entry pools.
581 */
582 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
583 &pool_allocator_meta, IPL_NONE);
584
585 /*
586 * Now that this is done, mark the pages shared with the
587 * hardware page table search as non-CCB (actually, as CI).
588 *
589 * XXX Hm. Given that this is in the kernel map, can't we just
590 * use the va's?
591 */
592 #ifdef M68060
593 #if defined(M68020) || defined(M68030) || defined(M68040)
594 if (cputype == CPU_68060)
595 #endif
596 {
597 struct kpt_page *kptp = kpt_free_list;
598 paddr_t paddr;
599
600 while (kptp) {
601 pmap_changebit(kptp->kpt_pa, PG_CI,
602 (pt_entry_t)~PG_CCB);
603 kptp = kptp->kpt_next;
604 }
605
606 paddr = (paddr_t)Segtabzeropa;
607 while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
608 pmap_changebit(paddr, PG_CI,
609 (pt_entry_t)~PG_CCB);
610 paddr += PAGE_SIZE;
611 }
612
613 DCIS();
614 }
615 #endif
616
617 /*
618 * Set up the routine that loads the MMU root table pointer.
619 */
620 switch (cputype) {
621 #if defined(M68020)
622 case CPU_68020:
623 #ifdef M68K_MMU_MOTOROLA
624 if (mmutype == MMU_68851) {
625 protorp[0] = MMU51_CRP_BITS;
626 pmap_load_urp_func = mmu_load_urp51;
627 }
628 #endif
629 #ifdef M68K_MMU_HP
630 if (mmutype == MMU_HP) {
631 pmap_load_urp_func = mmu_load_urp20hp;
632 }
633 #endif
634 break;
635 #endif /* M68020 */
636 #if defined(M68030)
637 case CPU_68030:
638 protorp[0] = MMU51_CRP_BITS;
639 pmap_load_urp_func = mmu_load_urp51;
640 break;
641 #endif /* M68030 */
642 #if defined(M68040)
643 case CPU_68040:
644 pmap_load_urp_func = mmu_load_urp40;
645 break;
646 #endif /* M68040 */
647 #if defined(M68060)
648 case CPU_68060:
649 pmap_load_urp_func = mmu_load_urp60;
650 break;
651 #endif /* M68060 */
652 default:
653 break;
654 }
655 if (pmap_load_urp_func == NULL) {
656 panic("pmap_init: No mmu_load_*() for cpu=%d mmu=%d",
657 cputype, mmutype);
658 }
659
660 /*
661 * Now it is safe to enable pv_table recording.
662 */
663 pmap_initialized = true;
664 }
665
666 /*
667 * pmap_map:
668 *
669 * Used to map a range of physical addresses into kernel
670 * virtual address space.
671 *
672 * For now, VM is already on, we only need to map the
673 * specified memory.
674 *
675 * Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
676 */
677 vaddr_t
pmap_map(vaddr_t va,paddr_t spa,paddr_t epa,int prot)678 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
679 {
680
681 PMAP_DPRINTF(PDB_FOLLOW,
682 ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
683
684 while (spa < epa) {
685 pmap_enter(pmap_kernel(), va, spa, prot, 0);
686 va += PAGE_SIZE;
687 spa += PAGE_SIZE;
688 }
689 pmap_update(pmap_kernel());
690 return va;
691 }
692
693 /*
694 * pmap_create: [ INTERFACE ]
695 *
696 * Create and return a physical map.
697 *
698 * Note: no locking is necessary in this function.
699 */
700 pmap_t
pmap_create(void)701 pmap_create(void)
702 {
703 struct pmap *pmap;
704
705 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
706 ("pmap_create()\n"));
707
708 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
709 memset(pmap, 0, sizeof(*pmap));
710 pmap_pinit(pmap);
711 return pmap;
712 }
713
714 /*
715 * pmap_pinit:
716 *
717 * Initialize a preallocated and zeroed pmap structure.
718 *
719 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
720 */
721 void
pmap_pinit(struct pmap * pmap)722 pmap_pinit(struct pmap *pmap)
723 {
724
725 PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
726 ("pmap_pinit(%p)\n", pmap));
727
728 /*
729 * No need to allocate page table space yet but we do need a
730 * valid segment table. Initially, we point everyone at the
731 * "null" segment table. On the first pmap_enter, a real
732 * segment table will be allocated.
733 */
734 pmap->pm_stab = Segtabzero;
735 pmap->pm_stpa = Segtabzeropa;
736 #if defined(M68040) || defined(M68060)
737 #if defined(M68020) || defined(M68030)
738 if (mmutype == MMU_68040)
739 #endif
740 pmap->pm_stfree = protostfree;
741 #endif
742 pmap->pm_count = 1;
743 }
744
745 /*
746 * pmap_destroy: [ INTERFACE ]
747 *
748 * Drop the reference count on the specified pmap, releasing
749 * all resources if the reference count drops to zero.
750 */
751 void
pmap_destroy(pmap_t pmap)752 pmap_destroy(pmap_t pmap)
753 {
754 int count;
755
756 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
757
758 count = atomic_dec_uint_nv(&pmap->pm_count);
759 if (count == 0) {
760 pmap_release(pmap);
761 pool_put(&pmap_pmap_pool, pmap);
762 }
763 }
764
765 /*
766 * pmap_release:
767 *
768 * Release the resources held by a pmap.
769 *
770 * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
771 */
772 void
pmap_release(pmap_t pmap)773 pmap_release(pmap_t pmap)
774 {
775
776 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
777
778 #ifdef notdef /* DIAGNOSTIC */
779 /* count would be 0 from pmap_destroy... */
780 if (pmap->pm_count != 1)
781 panic("pmap_release count");
782 #endif
783
784 if (pmap->pm_ptab) {
785 pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
786 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
787 uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
788 (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
789 uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
790 M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
791 }
792 KASSERT(pmap->pm_stab == Segtabzero);
793 }
794
795 /*
796 * pmap_reference: [ INTERFACE ]
797 *
798 * Add a reference to the specified pmap.
799 */
800 void
pmap_reference(pmap_t pmap)801 pmap_reference(pmap_t pmap)
802 {
803 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
804
805 atomic_inc_uint(&pmap->pm_count);
806 }
807
808 /*
809 * pmap_activate: [ INTERFACE ]
810 *
811 * Activate the pmap used by the specified process. This includes
812 * reloading the MMU context if the current process, and marking
813 * the pmap in use by the processor.
814 *
815 * Note: we may only use spin locks here, since we are called
816 * by a critical section in cpu_switch()!
817 */
818 void
pmap_activate(struct lwp * l)819 pmap_activate(struct lwp *l)
820 {
821 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
822
823 PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
824 ("pmap_activate(%p)\n", l));
825
826 KASSERT(l == curlwp);
827
828 /*
829 * Because the kernel has a separate root pointer, we don't
830 * need to activate the kernel pmap.
831 */
832 if (pmap != pmap_kernel()) {
833 pmap_load_urp((paddr_t)pmap->pm_stpa);
834 }
835 }
836
837 /*
838 * pmap_deactivate: [ INTERFACE ]
839 *
840 * Mark that the pmap used by the specified process is no longer
841 * in use by the processor.
842 *
843 * The comment above pmap_activate() wrt. locking applies here,
844 * as well.
845 */
846 void
pmap_deactivate(struct lwp * l)847 pmap_deactivate(struct lwp *l)
848 {
849
850 /* No action necessary in this pmap implementation. */
851 }
852
853 /*
854 * pmap_remove: [ INTERFACE ]
855 *
856 * Remove the given range of addresses from the specified map.
857 *
858 * It is assumed that the start and end are properly
859 * rounded to the page size.
860 */
861 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)862 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
863 {
864 vaddr_t nssva;
865 pt_entry_t *pte;
866 int flags;
867 #ifdef CACHE_HAVE_VAC
868 bool firstpage = true, needcflush = false;
869 #endif
870
871 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
872 ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
873
874 flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
875 while (sva < eva) {
876 nssva = m68k_trunc_seg(sva) + NBSEG;
877 if (nssva == 0 || nssva > eva)
878 nssva = eva;
879
880 /*
881 * Invalidate every valid mapping within this segment.
882 */
883
884 pte = pmap_pte(pmap, sva);
885 while (sva < nssva) {
886
887 /*
888 * If this segment is unallocated,
889 * skip to the next segment boundary.
890 */
891
892 if (!pmap_ste_v(pmap, sva)) {
893 sva = nssva;
894 break;
895 }
896
897 if (pmap_pte_v(pte)) {
898 #ifdef CACHE_HAVE_VAC
899 if (pmap_aliasmask) {
900
901 /*
902 * Purge kernel side of VAC to ensure
903 * we get the correct state of any
904 * hardware maintained bits.
905 */
906
907 if (firstpage) {
908 DCIS();
909 }
910
911 /*
912 * Remember if we may need to
913 * flush the VAC due to a non-CI
914 * mapping.
915 */
916
917 if (!needcflush && !pmap_pte_ci(pte))
918 needcflush = true;
919
920 }
921 firstpage = false;
922 #endif
923 pmap_remove_mapping(pmap, sva, pte, flags, NULL);
924 }
925 pte++;
926 sva += PAGE_SIZE;
927 }
928 }
929
930 #ifdef CACHE_HAVE_VAC
931
932 /*
933 * Didn't do anything, no need for cache flushes
934 */
935
936 if (firstpage)
937 return;
938
939 /*
940 * In a couple of cases, we don't need to worry about flushing
941 * the VAC:
942 * 1. if this is a kernel mapping,
943 * we have already done it
944 * 2. if it is a user mapping not for the current process,
945 * it won't be there
946 */
947
948 if (pmap_aliasmask && !active_user_pmap(pmap))
949 needcflush = false;
950 if (needcflush) {
951 if (pmap == pmap_kernel()) {
952 DCIS();
953 } else {
954 DCIU();
955 }
956 }
957 #endif
958 }
959
960 /*
961 * pmap_page_protect: [ INTERFACE ]
962 *
963 * Lower the permission for all mappings to a given page to
964 * the permissions specified.
965 */
966 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)967 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
968 {
969 paddr_t pa = VM_PAGE_TO_PHYS(pg);
970 struct pv_header *pvh;
971 struct pv_entry *pv;
972 pt_entry_t *pte;
973 int s;
974
975 #ifdef DEBUG
976 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
977 (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
978 printf("pmap_page_protect(%p, %x)\n", pg, prot);
979 #endif
980
981 switch (prot) {
982 case VM_PROT_READ|VM_PROT_WRITE:
983 case VM_PROT_ALL:
984 return;
985
986 /* copy_on_write */
987 case VM_PROT_READ:
988 case VM_PROT_READ|VM_PROT_EXECUTE:
989 pmap_changebit(pa, PG_RO, ~0);
990 return;
991
992 /* remove_all */
993 default:
994 break;
995 }
996
997 pvh = pa_to_pvh(pa);
998 pv = &pvh->pvh_first;
999 s = splvm();
1000 while (pv->pv_pmap != NULL) {
1001
1002 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1003 #ifdef DEBUG
1004 if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
1005 pmap_pte_pa(pte) != pa)
1006 panic("pmap_page_protect: bad mapping");
1007 #endif
1008 pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
1009 pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
1010 }
1011 splx(s);
1012 }
1013
1014 /*
1015 * pmap_protect: [ INTERFACE ]
1016 *
1017 * Set the physical protection on the specified range of this map
1018 * as requested.
1019 */
1020 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1021 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1022 {
1023 vaddr_t nssva;
1024 pt_entry_t *pte;
1025 bool firstpage __unused, needtflush;
1026 int isro;
1027
1028 PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
1029 ("pmap_protect(%p, %lx, %lx, %x)\n",
1030 pmap, sva, eva, prot));
1031
1032 #ifdef PMAPSTATS
1033 protect_stats.calls++;
1034 #endif
1035 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1036 pmap_remove(pmap, sva, eva);
1037 return;
1038 }
1039 isro = pte_prot(pmap, prot);
1040 needtflush = active_pmap(pmap);
1041 firstpage = true;
1042 while (sva < eva) {
1043 nssva = m68k_trunc_seg(sva) + NBSEG;
1044 if (nssva == 0 || nssva > eva)
1045 nssva = eva;
1046
1047 /*
1048 * If VA belongs to an unallocated segment,
1049 * skip to the next segment boundary.
1050 */
1051
1052 if (!pmap_ste_v(pmap, sva)) {
1053 sva = nssva;
1054 continue;
1055 }
1056
1057 /*
1058 * Change protection on mapping if it is valid and doesn't
1059 * already have the correct protection.
1060 */
1061
1062 pte = pmap_pte(pmap, sva);
1063 while (sva < nssva) {
1064 if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
1065 #ifdef CACHE_HAVE_VAC
1066
1067 /*
1068 * Purge kernel side of VAC to ensure we
1069 * get the correct state of any hardware
1070 * maintained bits.
1071 *
1072 * XXX do we need to clear the VAC in
1073 * general to reflect the new protection?
1074 */
1075
1076 if (firstpage && pmap_aliasmask)
1077 DCIS();
1078 #endif
1079
1080 #if defined(M68040) || defined(M68060)
1081
1082 /*
1083 * Clear caches if making RO (see section
1084 * "7.3 Cache Coherency" in the manual).
1085 */
1086
1087 #if defined(M68020) || defined(M68030)
1088 if (isro && mmutype == MMU_68040)
1089 #else
1090 if (isro)
1091 #endif
1092 {
1093 paddr_t pa = pmap_pte_pa(pte);
1094
1095 DCFP(pa);
1096 ICPP(pa);
1097 }
1098 #endif
1099 pmap_pte_set_prot(pte, isro);
1100 if (needtflush)
1101 TBIS(sva);
1102 firstpage = false;
1103 }
1104 pte++;
1105 sva += PAGE_SIZE;
1106 }
1107 }
1108 }
1109
1110 /*
1111 * pmap_enter: [ INTERFACE ]
1112 *
1113 * Insert the given physical page (pa) at
1114 * the specified virtual address (va) in the
1115 * target physical map with the protection requested.
1116 *
1117 * If specified, the page will be wired down, meaning
1118 * that the related pte cannot be reclaimed.
1119 *
1120 * Note: This is the only routine which MAY NOT lazy-evaluate
1121 * or lose information. Thatis, this routine must actually
1122 * insert this page into the given map NOW.
1123 */
1124 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1125 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1126 {
1127 pt_entry_t *pte;
1128 struct pv_entry *opv = NULL;
1129 int npte;
1130 paddr_t opa;
1131 bool cacheable = true;
1132 bool checkpv = true;
1133 bool wired = (flags & PMAP_WIRED) != 0;
1134 bool can_fail = (flags & PMAP_CANFAIL) != 0;
1135
1136 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1137 ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1138 pmap, va, pa, prot, wired));
1139
1140 #ifdef DIAGNOSTIC
1141 /*
1142 * pmap_enter() should never be used for CADDR1 and CADDR2.
1143 */
1144 if (pmap == pmap_kernel() &&
1145 (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
1146 panic("pmap_enter: used for CADDR1 or CADDR2");
1147 #endif
1148
1149 /*
1150 * For user mapping, allocate kernel VM resources if necessary.
1151 */
1152 if (pmap->pm_ptab == NULL) {
1153 pmap->pm_ptab = (pt_entry_t *)
1154 uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1155 UVM_KMF_VAONLY |
1156 (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
1157 if (pmap->pm_ptab == NULL)
1158 return ENOMEM;
1159 }
1160
1161 /*
1162 * Segment table entry not valid, we need a new PT page
1163 */
1164 if (!pmap_ste_v(pmap, va)) {
1165 int err = pmap_enter_ptpage(pmap, va, can_fail);
1166 if (err)
1167 return err;
1168 }
1169
1170 pa = m68k_trunc_page(pa);
1171 pte = pmap_pte(pmap, va);
1172 opa = pmap_pte_pa(pte);
1173
1174 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1175
1176 /*
1177 * Mapping has not changed, must be protection or wiring change.
1178 */
1179 if (opa == pa) {
1180 /*
1181 * Wiring change, just update stats.
1182 * We don't worry about wiring PT pages as they remain
1183 * resident as long as there are valid mappings in them.
1184 * Hence, if a user page is wired, the PT page will be also.
1185 */
1186 if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
1187 PMAP_DPRINTF(PDB_ENTER,
1188 ("enter: wiring change -> %x\n", wired));
1189 if (wired)
1190 pmap->pm_stats.wired_count++;
1191 else
1192 pmap->pm_stats.wired_count--;
1193 }
1194 /*
1195 * Retain cache inhibition status
1196 */
1197 checkpv = false;
1198 if (pmap_pte_ci(pte))
1199 cacheable = false;
1200 goto validate;
1201 }
1202
1203 /*
1204 * Mapping has changed, invalidate old range and fall through to
1205 * handle validating new mapping.
1206 */
1207 if (opa) {
1208 PMAP_DPRINTF(PDB_ENTER,
1209 ("enter: removing old mapping %lx\n", va));
1210 pmap_remove_mapping(pmap, va, pte,
1211 PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
1212 }
1213
1214 /*
1215 * If this is a new user mapping, increment the wiring count
1216 * on this PT page. PT pages are wired down as long as there
1217 * is a valid mapping in the page.
1218 */
1219 if (pmap != pmap_kernel())
1220 pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1221
1222 /*
1223 * Enter on the PV list if part of our managed memory
1224 * Note that we raise IPL while manipulating pv_table
1225 * since pmap_enter can be called at interrupt time.
1226 */
1227 if (PAGE_IS_MANAGED(pa)) {
1228 struct pv_header *pvh;
1229 struct pv_entry *pv, *npv;
1230 int s;
1231
1232 pvh = pa_to_pvh(pa);
1233 pv = &pvh->pvh_first;
1234 s = splvm();
1235
1236 PMAP_DPRINTF(PDB_ENTER,
1237 ("enter: pv at %p: %lx/%p/%p\n",
1238 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
1239 /*
1240 * No entries yet, use header as the first entry
1241 */
1242 if (pv->pv_pmap == NULL) {
1243 pv->pv_va = va;
1244 pv->pv_pmap = pmap;
1245 pv->pv_next = NULL;
1246 pv->pv_ptste = NULL;
1247 pv->pv_ptpmap = NULL;
1248 pvh->pvh_attrs = 0;
1249 }
1250 /*
1251 * There is at least one other VA mapping this page.
1252 * Place this entry after the header.
1253 */
1254 else {
1255 #ifdef DEBUG
1256 for (npv = pv; npv; npv = npv->pv_next)
1257 if (pmap == npv->pv_pmap && va == npv->pv_va)
1258 panic("pmap_enter: already in pv_tab");
1259 #endif
1260 if (opv != NULL) {
1261 npv = opv;
1262 opv = NULL;
1263 } else {
1264 npv = pmap_alloc_pv();
1265 }
1266 KASSERT(npv != NULL);
1267 npv->pv_va = va;
1268 npv->pv_pmap = pmap;
1269 npv->pv_next = pv->pv_next;
1270 npv->pv_ptste = NULL;
1271 npv->pv_ptpmap = NULL;
1272 pv->pv_next = npv;
1273
1274 #ifdef CACHE_HAVE_VAC
1275
1276 /*
1277 * Since there is another logical mapping for the
1278 * same page we may need to cache-inhibit the
1279 * descriptors on those CPUs with external VACs.
1280 * We don't need to CI if:
1281 *
1282 * - No two mappings belong to the same user pmaps.
1283 * Since the cache is flushed on context switches
1284 * there is no problem between user processes.
1285 *
1286 * - Mappings within a single pmap are a certain
1287 * magic distance apart. VAs at these appropriate
1288 * boundaries map to the same cache entries or
1289 * otherwise don't conflict.
1290 *
1291 * To keep it simple, we only check for these special
1292 * cases if there are only two mappings, otherwise we
1293 * punt and always CI.
1294 *
1295 * Note that there are no aliasing problems with the
1296 * on-chip data-cache when the WA bit is set.
1297 */
1298
1299 if (pmap_aliasmask) {
1300 if (pvh->pvh_attrs & PVH_CI) {
1301 PMAP_DPRINTF(PDB_CACHE,
1302 ("enter: pa %lx already CI'ed\n",
1303 pa));
1304 checkpv = cacheable = false;
1305 } else if (npv->pv_next ||
1306 ((pmap == pv->pv_pmap ||
1307 pmap == pmap_kernel() ||
1308 pv->pv_pmap == pmap_kernel()) &&
1309 ((pv->pv_va & pmap_aliasmask) !=
1310 (va & pmap_aliasmask)))) {
1311 PMAP_DPRINTF(PDB_CACHE,
1312 ("enter: pa %lx CI'ing all\n",
1313 pa));
1314 cacheable = false;
1315 pvh->pvh_attrs |= PVH_CI;
1316 }
1317 }
1318 #endif
1319 }
1320
1321 /*
1322 * Speed pmap_is_referenced() or pmap_is_modified() based
1323 * on the hint provided in access_type.
1324 */
1325 #ifdef DIAGNOSTIC
1326 if ((flags & VM_PROT_ALL) & ~prot)
1327 panic("pmap_enter: access_type exceeds prot");
1328 #endif
1329 if (flags & VM_PROT_WRITE)
1330 pvh->pvh_attrs |= (PG_U|PG_M);
1331 else if (flags & VM_PROT_ALL)
1332 pvh->pvh_attrs |= PG_U;
1333
1334 splx(s);
1335 }
1336 /*
1337 * Assumption: if it is not part of our managed memory
1338 * then it must be device memory which may be volitile.
1339 */
1340 else if (pmap_initialized) {
1341 checkpv = cacheable = false;
1342 }
1343
1344 /*
1345 * Increment counters
1346 */
1347 pmap->pm_stats.resident_count++;
1348 if (wired)
1349 pmap->pm_stats.wired_count++;
1350
1351 validate:
1352 #ifdef CACHE_HAVE_VAC
1353 /*
1354 * Purge kernel side of VAC to ensure we get correct state
1355 * of HW bits so we don't clobber them.
1356 */
1357 if (pmap_aliasmask)
1358 DCIS();
1359 #endif
1360
1361 /*
1362 * Build the new PTE.
1363 */
1364
1365 npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
1366 if (wired)
1367 npte |= PG_W;
1368 if (!checkpv && !cacheable)
1369 #if defined(M68040) || defined(M68060)
1370 #if defined(M68020) || defined(M68030)
1371 npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
1372 #else
1373 npte |= PG_CIN;
1374 #endif
1375 #else
1376 npte |= PG_CI;
1377 #endif
1378 #if defined(M68040) || defined(M68060)
1379 #if defined(M68020) || defined(M68030)
1380 else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1381 #else
1382 else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
1383 #endif
1384 npte |= PG_CCB;
1385 #endif
1386
1387 PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1388
1389 /*
1390 * Remember if this was a wiring-only change.
1391 * If so, we need not flush the TLB and caches.
1392 */
1393
1394 wired = ((*pte ^ npte) == PG_W);
1395 #if defined(M68040) || defined(M68060)
1396 #if defined(M68020) || defined(M68030)
1397 if (mmutype == MMU_68040 && !wired)
1398 #else
1399 if (!wired)
1400 #endif
1401 {
1402 DCFP(pa);
1403 ICPP(pa);
1404 }
1405 #endif
1406 *pte = npte;
1407 if (!wired && active_pmap(pmap))
1408 TBIS(va);
1409 #ifdef CACHE_HAVE_VAC
1410 /*
1411 * The following is executed if we are entering a second
1412 * (or greater) mapping for a physical page and the mappings
1413 * may create an aliasing problem. In this case we must
1414 * cache inhibit the descriptors involved and flush any
1415 * external VAC.
1416 */
1417 if (checkpv && !cacheable) {
1418 pmap_changebit(pa, PG_CI, ~0);
1419 DCIA();
1420 #ifdef DEBUG
1421 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1422 (PDB_CACHE|PDB_PVDUMP))
1423 pmap_pvdump(pa);
1424 #endif
1425 }
1426 #endif
1427 #ifdef DEBUG
1428 if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
1429 pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
1430 #endif
1431
1432 if (opv != NULL)
1433 pmap_free_pv(opv);
1434
1435 return 0;
1436 }
1437
1438 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1439 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1440 {
1441 pmap_t pmap = pmap_kernel();
1442 pt_entry_t *pte;
1443 int s, npte;
1444
1445 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1446 ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1447
1448 /*
1449 * Segment table entry not valid, we need a new PT page
1450 */
1451
1452 if (!pmap_ste_v(pmap, va)) {
1453 s = splvm();
1454 pmap_enter_ptpage(pmap, va, false);
1455 splx(s);
1456 }
1457
1458 pa = m68k_trunc_page(pa);
1459 pte = pmap_pte(pmap, va);
1460
1461 PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1462 KASSERT(!pmap_pte_v(pte));
1463
1464 /*
1465 * Increment counters
1466 */
1467
1468 pmap->pm_stats.resident_count++;
1469 pmap->pm_stats.wired_count++;
1470
1471 /*
1472 * Build the new PTE.
1473 */
1474
1475 npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1476 #if defined(M68040) || defined(M68060)
1477 #if defined(M68020) || defined(M68030)
1478 if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
1479 #else
1480 if ((npte & PG_PROT) == PG_RW)
1481 #endif
1482 npte |= PG_CCB;
1483
1484 if (mmutype == MMU_68040) {
1485 DCFP(pa);
1486 ICPP(pa);
1487 }
1488 #endif
1489
1490 *pte = npte;
1491 TBIS(va);
1492 }
1493
1494 void
pmap_kremove(vaddr_t va,vsize_t size)1495 pmap_kremove(vaddr_t va, vsize_t size)
1496 {
1497 pmap_t pmap = pmap_kernel();
1498 pt_entry_t *pte;
1499 vaddr_t nssva;
1500 vaddr_t eva = va + size;
1501 #ifdef CACHE_HAVE_VAC
1502 bool firstpage, needcflush;
1503 #endif
1504
1505 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1506 ("pmap_kremove(%lx, %lx)\n", va, size));
1507
1508 #ifdef CACHE_HAVE_VAC
1509 firstpage = true;
1510 needcflush = false;
1511 #endif
1512 while (va < eva) {
1513 nssva = m68k_trunc_seg(va) + NBSEG;
1514 if (nssva == 0 || nssva > eva)
1515 nssva = eva;
1516
1517 /*
1518 * If VA belongs to an unallocated segment,
1519 * skip to the next segment boundary.
1520 */
1521
1522 if (!pmap_ste_v(pmap, va)) {
1523 va = nssva;
1524 continue;
1525 }
1526
1527 /*
1528 * Invalidate every valid mapping within this segment.
1529 */
1530
1531 pte = pmap_pte(pmap, va);
1532 while (va < nssva) {
1533 if (!pmap_pte_v(pte)) {
1534 pte++;
1535 va += PAGE_SIZE;
1536 continue;
1537 }
1538 #ifdef CACHE_HAVE_VAC
1539 if (pmap_aliasmask) {
1540
1541 /*
1542 * Purge kernel side of VAC to ensure
1543 * we get the correct state of any
1544 * hardware maintained bits.
1545 */
1546
1547 if (firstpage) {
1548 DCIS();
1549 firstpage = false;
1550 }
1551
1552 /*
1553 * Remember if we may need to
1554 * flush the VAC.
1555 */
1556
1557 needcflush = true;
1558 }
1559 #endif
1560 pmap->pm_stats.wired_count--;
1561 pmap->pm_stats.resident_count--;
1562 *pte = PG_NV;
1563 TBIS(va);
1564 pte++;
1565 va += PAGE_SIZE;
1566 }
1567 }
1568
1569 #ifdef CACHE_HAVE_VAC
1570
1571 /*
1572 * In a couple of cases, we don't need to worry about flushing
1573 * the VAC:
1574 * 1. if this is a kernel mapping,
1575 * we have already done it
1576 * 2. if it is a user mapping not for the current process,
1577 * it won't be there
1578 */
1579
1580 if (pmap_aliasmask && !active_user_pmap(pmap))
1581 needcflush = false;
1582 if (needcflush) {
1583 if (pmap == pmap_kernel()) {
1584 DCIS();
1585 } else {
1586 DCIU();
1587 }
1588 }
1589 #endif
1590 }
1591
1592 /*
1593 * pmap_unwire: [ INTERFACE ]
1594 *
1595 * Clear the wired attribute for a map/virtual-address pair.
1596 *
1597 * The mapping must already exist in the pmap.
1598 */
1599 void
pmap_unwire(pmap_t pmap,vaddr_t va)1600 pmap_unwire(pmap_t pmap, vaddr_t va)
1601 {
1602 pt_entry_t *pte;
1603
1604 PMAP_DPRINTF(PDB_FOLLOW,
1605 ("pmap_unwire(%p, %lx)\n", pmap, va));
1606
1607 pte = pmap_pte(pmap, va);
1608
1609 /*
1610 * If wiring actually changed (always?) clear the wire bit and
1611 * update the wire count. Note that wiring is not a hardware
1612 * characteristic so there is no need to invalidate the TLB.
1613 */
1614
1615 if (pmap_pte_w_chg(pte, 0)) {
1616 pmap_pte_set_w(pte, false);
1617 pmap->pm_stats.wired_count--;
1618 }
1619 }
1620
1621 /*
1622 * pmap_extract: [ INTERFACE ]
1623 *
1624 * Extract the physical address associated with the given
1625 * pmap/virtual address pair.
1626 */
1627 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)1628 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1629 {
1630 paddr_t pa;
1631 u_int pte;
1632
1633 PMAP_DPRINTF(PDB_FOLLOW,
1634 ("pmap_extract(%p, %lx) -> ", pmap, va));
1635
1636 if (pmap_ste_v(pmap, va)) {
1637 pte = *(u_int *)pmap_pte(pmap, va);
1638 if (pte) {
1639 pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
1640 if (pap != NULL)
1641 *pap = pa;
1642 #ifdef DEBUG
1643 if (pmapdebug & PDB_FOLLOW)
1644 printf("%lx\n", pa);
1645 #endif
1646 return true;
1647 }
1648 }
1649 #ifdef DEBUG
1650 if (pmapdebug & PDB_FOLLOW)
1651 printf("failed\n");
1652 #endif
1653 return false;
1654 }
1655
1656 /*
1657 * vtophys: [ INTERFACE-ish ]
1658 *
1659 * Kernel virtual to physical. Use with caution.
1660 */
1661 paddr_t
vtophys(vaddr_t va)1662 vtophys(vaddr_t va)
1663 {
1664 paddr_t pa;
1665
1666 if (pmap_extract(pmap_kernel(), va, &pa))
1667 return pa;
1668 KASSERT(0);
1669 return (paddr_t) -1;
1670 }
1671
1672 /*
1673 * pmap_copy: [ INTERFACE ]
1674 *
1675 * Copy the mapping range specified by src_addr/len
1676 * from the source map to the range dst_addr/len
1677 * in the destination map.
1678 *
1679 * This routine is only advisory and need not do anything.
1680 */
1681 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1682 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1683 vaddr_t src_addr)
1684 {
1685
1686 PMAP_DPRINTF(PDB_FOLLOW,
1687 ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
1688 dst_pmap, src_pmap, dst_addr, len, src_addr));
1689 }
1690
1691 /*
1692 * pmap_collect1():
1693 *
1694 * Garbage-collect KPT pages. Helper for the above (bogus)
1695 * pmap_collect().
1696 *
1697 * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
1698 * WAY OF HANDLING PT PAGES!
1699 */
1700 static inline void
pmap_collect1(pmap_t pmap,paddr_t startpa,paddr_t endpa)1701 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1702 {
1703 paddr_t pa;
1704 struct pv_header *pvh;
1705 struct pv_entry *pv;
1706 pt_entry_t *pte;
1707 paddr_t kpa;
1708 #ifdef DEBUG
1709 st_entry_t *ste;
1710 int opmapdebug = 0;
1711 #endif
1712
1713 for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1714 struct kpt_page *kpt, **pkpt;
1715
1716 /*
1717 * Locate physical pages which are being used as kernel
1718 * page table pages.
1719 */
1720
1721 pvh = pa_to_pvh(pa);
1722 pv = &pvh->pvh_first;
1723 if (pv->pv_pmap != pmap_kernel() ||
1724 !(pvh->pvh_attrs & PVH_PTPAGE))
1725 continue;
1726 do {
1727 if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
1728 break;
1729 } while ((pv = pv->pv_next));
1730 if (pv == NULL)
1731 continue;
1732 #ifdef DEBUG
1733 if (pv->pv_va < (vaddr_t)Sysmap ||
1734 pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
1735 printf("collect: kernel PT VA out of range\n");
1736 pmap_pvdump(pa);
1737 continue;
1738 }
1739 #endif
1740 pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1741 while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
1742 ;
1743 if (pte >= (pt_entry_t *)pv->pv_va)
1744 continue;
1745
1746 #ifdef DEBUG
1747 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1748 printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1749 pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1750 opmapdebug = pmapdebug;
1751 pmapdebug |= PDB_PTPAGE;
1752 }
1753
1754 ste = pv->pv_ptste;
1755 #endif
1756 /*
1757 * If all entries were invalid we can remove the page.
1758 * We call pmap_remove_entry to take care of invalidating
1759 * ST and Sysptmap entries.
1760 */
1761
1762 if (!pmap_extract(pmap, pv->pv_va, &kpa)) {
1763 printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1764 pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1765 panic("pmap_collect: mapping not found");
1766 }
1767 pmap_remove_mapping(pmap, pv->pv_va, NULL,
1768 PRM_TFLUSH|PRM_CFLUSH, NULL);
1769
1770 /*
1771 * Use the physical address to locate the original
1772 * (kmem_alloc assigned) address for the page and put
1773 * that page back on the free list.
1774 */
1775
1776 for (pkpt = &kpt_used_list, kpt = *pkpt;
1777 kpt != NULL;
1778 pkpt = &kpt->kpt_next, kpt = *pkpt)
1779 if (kpt->kpt_pa == kpa)
1780 break;
1781 #ifdef DEBUG
1782 if (kpt == NULL)
1783 panic("pmap_collect: lost a KPT page");
1784 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1785 printf("collect: %lx (%lx) to free list\n",
1786 kpt->kpt_va, kpa);
1787 #endif
1788 *pkpt = kpt->kpt_next;
1789 kpt->kpt_next = kpt_free_list;
1790 kpt_free_list = kpt;
1791 #ifdef DEBUG
1792 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1793 pmapdebug = opmapdebug;
1794
1795 if (*ste != SG_NV)
1796 printf("collect: kernel STE at %p still valid (%x)\n",
1797 ste, *ste);
1798 ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
1799 if (*ste != SG_NV)
1800 printf("collect: kernel PTmap at %p still valid (%x)\n",
1801 ste, *ste);
1802 #endif
1803 }
1804 }
1805
1806 /*
1807 * pmap_collect:
1808 *
1809 * Helper for pmap_enter_ptpage().
1810 *
1811 * Garbage collects the physical map system for pages which are no
1812 * longer used. Success need not be guaranteed -- that is, there
1813 * may well be pages which are not referenced, but others may be
1814 * collected.
1815 */
1816 static void
pmap_collect(void)1817 pmap_collect(void)
1818 {
1819 int s;
1820 uvm_physseg_t bank;
1821
1822 /*
1823 * XXX This is very bogus. We should handle kernel PT
1824 * XXX pages much differently.
1825 */
1826
1827 s = splvm();
1828 for (bank = uvm_physseg_get_first();
1829 uvm_physseg_valid_p(bank);
1830 bank = uvm_physseg_get_next(bank)) {
1831 pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
1832 ptoa(uvm_physseg_get_end(bank)));
1833 }
1834 splx(s);
1835 }
1836
1837 /*
1838 * pmap_zero_page: [ INTERFACE ]
1839 *
1840 * Zero the specified (machine independent) page by mapping the page
1841 * into virtual memory and using memset to clear its contents, one
1842 * machine dependent page at a time.
1843 *
1844 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1845 * (Actually, we go to splvm(), and since we don't
1846 * support multiple processors, this is sufficient.)
1847 */
1848 void
pmap_zero_page(paddr_t phys)1849 pmap_zero_page(paddr_t phys)
1850 {
1851 int npte;
1852
1853 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
1854
1855 npte = phys | PG_V;
1856 #ifdef CACHE_HAVE_VAC
1857 if (pmap_aliasmask) {
1858
1859 /*
1860 * Cache-inhibit the mapping on VAC machines, as we would
1861 * be wasting the cache load.
1862 */
1863
1864 npte |= PG_CI;
1865 }
1866 #endif
1867
1868 #if defined(M68040) || defined(M68060)
1869 #if defined(M68020) || defined(M68030)
1870 if (mmutype == MMU_68040)
1871 #endif
1872 {
1873 /*
1874 * Set copyback caching on the page; this is required
1875 * for cache consistency (since regular mappings are
1876 * copyback as well).
1877 */
1878
1879 npte |= PG_CCB;
1880 }
1881 #endif
1882
1883 *caddr1_pte = npte;
1884 TBIS((vaddr_t)CADDR1);
1885
1886 zeropage(CADDR1);
1887
1888 #ifdef DEBUG
1889 *caddr1_pte = PG_NV;
1890 TBIS((vaddr_t)CADDR1);
1891 #endif
1892 }
1893
1894 /*
1895 * pmap_copy_page: [ INTERFACE ]
1896 *
1897 * Copy the specified (machine independent) page by mapping the page
1898 * into virtual memory and using memcpy to copy the page, one machine
1899 * dependent page at a time.
1900 *
1901 * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1902 * (Actually, we go to splvm(), and since we don't
1903 * support multiple processors, this is sufficient.)
1904 */
1905 void
pmap_copy_page(paddr_t src,paddr_t dst)1906 pmap_copy_page(paddr_t src, paddr_t dst)
1907 {
1908 pt_entry_t npte1, npte2;
1909
1910 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
1911
1912 npte1 = src | PG_RO | PG_V;
1913 npte2 = dst | PG_V;
1914 #ifdef CACHE_HAVE_VAC
1915 if (pmap_aliasmask) {
1916
1917 /*
1918 * Cache-inhibit the mapping on VAC machines, as we would
1919 * be wasting the cache load.
1920 */
1921
1922 npte1 |= PG_CI;
1923 npte2 |= PG_CI;
1924 }
1925 #endif
1926
1927 #if defined(M68040) || defined(M68060)
1928 #if defined(M68020) || defined(M68030)
1929 if (mmutype == MMU_68040)
1930 #endif
1931 {
1932 /*
1933 * Set copyback caching on the pages; this is required
1934 * for cache consistency (since regular mappings are
1935 * copyback as well).
1936 */
1937
1938 npte1 |= PG_CCB;
1939 npte2 |= PG_CCB;
1940 }
1941 #endif
1942
1943 *caddr1_pte = npte1;
1944 TBIS((vaddr_t)CADDR1);
1945
1946 *caddr2_pte = npte2;
1947 TBIS((vaddr_t)CADDR2);
1948
1949 copypage(CADDR1, CADDR2);
1950
1951 #ifdef DEBUG
1952 *caddr1_pte = PG_NV;
1953 TBIS((vaddr_t)CADDR1);
1954
1955 *caddr2_pte = PG_NV;
1956 TBIS((vaddr_t)CADDR2);
1957 #endif
1958 }
1959
1960 /*
1961 * pmap_clear_modify: [ INTERFACE ]
1962 *
1963 * Clear the modify bits on the specified physical page.
1964 */
1965 bool
pmap_clear_modify(struct vm_page * pg)1966 pmap_clear_modify(struct vm_page *pg)
1967 {
1968 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1969
1970 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
1971
1972 return pmap_changebit(pa, 0, (pt_entry_t)~PG_M);
1973 }
1974
1975 /*
1976 * pmap_clear_reference: [ INTERFACE ]
1977 *
1978 * Clear the reference bit on the specified physical page.
1979 */
1980 bool
pmap_clear_reference(struct vm_page * pg)1981 pmap_clear_reference(struct vm_page *pg)
1982 {
1983 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1984
1985 PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
1986
1987 return pmap_changebit(pa, 0, (pt_entry_t)~PG_U);
1988 }
1989
1990 /*
1991 * pmap_is_referenced: [ INTERFACE ]
1992 *
1993 * Return whether or not the specified physical page is referenced
1994 * by any physical maps.
1995 */
1996 bool
pmap_is_referenced(struct vm_page * pg)1997 pmap_is_referenced(struct vm_page *pg)
1998 {
1999 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2000
2001 return pmap_testbit(pa, PG_U);
2002 }
2003
2004 /*
2005 * pmap_is_modified: [ INTERFACE ]
2006 *
2007 * Return whether or not the specified physical page is modified
2008 * by any physical maps.
2009 */
2010 bool
pmap_is_modified(struct vm_page * pg)2011 pmap_is_modified(struct vm_page *pg)
2012 {
2013 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2014
2015 return pmap_testbit(pa, PG_M);
2016 }
2017
2018 /*
2019 * pmap_phys_address: [ INTERFACE ]
2020 *
2021 * Return the physical address corresponding to the specified
2022 * cookie. Used by the device pager to decode a device driver's
2023 * mmap entry point return value.
2024 *
2025 * Note: no locking is necessary in this function.
2026 */
2027 paddr_t
pmap_phys_address(paddr_t ppn)2028 pmap_phys_address(paddr_t ppn)
2029 {
2030 return m68k_ptob(ppn);
2031 }
2032
2033 #ifdef CACHE_HAVE_VAC
2034 /*
2035 * pmap_prefer: [ INTERFACE ]
2036 *
2037 * Find the first virtual address >= *vap that does not
2038 * cause a virtually-addressed cache alias problem.
2039 */
2040 void
pmap_prefer(vaddr_t foff,vaddr_t * vap)2041 pmap_prefer(vaddr_t foff, vaddr_t *vap)
2042 {
2043 vaddr_t va;
2044 vsize_t d;
2045
2046 #ifdef M68K_MMU_MOTOROLA
2047 if (pmap_aliasmask)
2048 #endif
2049 {
2050 va = *vap;
2051 d = foff - va;
2052 d &= pmap_aliasmask;
2053 *vap = va + d;
2054 }
2055 }
2056 #endif /* CACHE_HAVE_VAC */
2057
2058 /*
2059 * Miscellaneous support routines follow
2060 */
2061
2062 /*
2063 * pmap_remove_mapping:
2064 *
2065 * Invalidate a single page denoted by pmap/va.
2066 *
2067 * If (pte != NULL), it is the already computed PTE for the page.
2068 *
2069 * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
2070 *
2071 * If (flags & PRM_CFLUSH), we must flush/invalidate any cache
2072 * information.
2073 *
2074 * If (flags & PRM_KEEPPTPAGE), we don't free the page table page
2075 * if the reference drops to zero.
2076 */
2077 /* static */
2078 void
pmap_remove_mapping(pmap_t pmap,vaddr_t va,pt_entry_t * pte,int flags,struct pv_entry ** opvp)2079 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
2080 struct pv_entry **opvp)
2081 {
2082 paddr_t pa;
2083 struct pv_header *pvh;
2084 struct pv_entry *pv, *npv, *opv = NULL;
2085 struct pmap *ptpmap;
2086 st_entry_t *ste;
2087 int s, bits;
2088 #ifdef DEBUG
2089 pt_entry_t opte;
2090 #endif
2091
2092 PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
2093 ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
2094 pmap, va, pte, flags, opvp));
2095
2096 /*
2097 * PTE not provided, compute it from pmap and va.
2098 */
2099
2100 if (pte == NULL) {
2101 pte = pmap_pte(pmap, va);
2102 if (*pte == PG_NV)
2103 return;
2104 }
2105
2106 #ifdef CACHE_HAVE_VAC
2107 if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
2108
2109 /*
2110 * Purge kernel side of VAC to ensure we get the correct
2111 * state of any hardware maintained bits.
2112 */
2113
2114 DCIS();
2115
2116 /*
2117 * If this is a non-CI user mapping for the current process,
2118 * flush the VAC. Note that the kernel side was flushed
2119 * above so we don't worry about non-CI kernel mappings.
2120 */
2121
2122 if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
2123 DCIU();
2124 }
2125 }
2126 #endif
2127
2128 pa = pmap_pte_pa(pte);
2129 #ifdef DEBUG
2130 opte = *pte;
2131 #endif
2132
2133 /*
2134 * Update statistics
2135 */
2136
2137 if (pmap_pte_w(pte))
2138 pmap->pm_stats.wired_count--;
2139 pmap->pm_stats.resident_count--;
2140
2141 #if defined(M68040) || defined(M68060)
2142 #if defined(M68020) || defined(M68030)
2143 if (mmutype == MMU_68040)
2144 #endif
2145 if ((flags & PRM_CFLUSH)) {
2146 DCFP(pa);
2147 ICPP(pa);
2148 }
2149 #endif
2150
2151 /*
2152 * Invalidate the PTE after saving the reference modify info.
2153 */
2154
2155 PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
2156 bits = *pte & (PG_U|PG_M);
2157 *pte = PG_NV;
2158 if ((flags & PRM_TFLUSH) && active_pmap(pmap))
2159 TBIS(va);
2160
2161 /*
2162 * For user mappings decrement the wiring count on
2163 * the PT page.
2164 */
2165
2166 if (pmap != pmap_kernel()) {
2167 vaddr_t ptpva = trunc_page((vaddr_t)pte);
2168 int refs = pmap_ptpage_delref(ptpva);
2169 #ifdef DEBUG
2170 if (pmapdebug & PDB_WIRING)
2171 pmap_check_wiring("remove", ptpva);
2172 #endif
2173
2174 /*
2175 * If reference count drops to 0, and we're not instructed
2176 * to keep it around, free the PT page.
2177 */
2178
2179 if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
2180 #ifdef DIAGNOSTIC
2181 struct pv_header *ptppvh;
2182 struct pv_entry *ptppv;
2183 #endif
2184 paddr_t ptppa;
2185
2186 ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
2187 #ifdef DIAGNOSTIC
2188 if (PAGE_IS_MANAGED(ptppa) == 0)
2189 panic("pmap_remove_mapping: unmanaged PT page");
2190 ptppvh = pa_to_pvh(ptppa);
2191 ptppv = &ptppvh->pvh_first;
2192 if (ptppv->pv_ptste == NULL)
2193 panic("pmap_remove_mapping: ptste == NULL");
2194 if (ptppv->pv_pmap != pmap_kernel() ||
2195 ptppv->pv_va != ptpva ||
2196 ptppv->pv_next != NULL)
2197 panic("pmap_remove_mapping: "
2198 "bad PT page pmap %p, va 0x%lx, next %p",
2199 ptppv->pv_pmap, ptppv->pv_va,
2200 ptppv->pv_next);
2201 #endif
2202 pmap_remove_mapping(pmap_kernel(), ptpva,
2203 NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
2204 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2205 uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
2206 rw_exit(uvm_kernel_object->vmobjlock);
2207 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2208 ("remove: PT page 0x%lx (0x%lx) freed\n",
2209 ptpva, ptppa));
2210 }
2211 }
2212
2213 /*
2214 * If this isn't a managed page, we are all done.
2215 */
2216
2217 if (PAGE_IS_MANAGED(pa) == 0)
2218 return;
2219
2220 /*
2221 * Otherwise remove it from the PV table
2222 * (raise IPL since we may be called at interrupt time).
2223 */
2224
2225 pvh = pa_to_pvh(pa);
2226 pv = &pvh->pvh_first;
2227 ste = NULL;
2228 s = splvm();
2229
2230 /*
2231 * If it is the first entry on the list, it is actually
2232 * in the header and we must copy the following entry up
2233 * to the header. Otherwise we must search the list for
2234 * the entry. In either case we free the now unused entry.
2235 */
2236
2237 if (pmap == pv->pv_pmap && va == pv->pv_va) {
2238 ste = pv->pv_ptste;
2239 ptpmap = pv->pv_ptpmap;
2240 npv = pv->pv_next;
2241 if (npv) {
2242 *pv = *npv;
2243 opv = npv;
2244 } else
2245 pv->pv_pmap = NULL;
2246 } else {
2247 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2248 if (pmap == npv->pv_pmap && va == npv->pv_va)
2249 break;
2250 pv = npv;
2251 }
2252 #ifdef DEBUG
2253 if (npv == NULL)
2254 panic("pmap_remove: PA not in pv_tab");
2255 #endif
2256 ste = npv->pv_ptste;
2257 ptpmap = npv->pv_ptpmap;
2258 pv->pv_next = npv->pv_next;
2259 opv = npv;
2260 pvh = pa_to_pvh(pa);
2261 pv = &pvh->pvh_first;
2262 }
2263
2264 #ifdef CACHE_HAVE_VAC
2265
2266 /*
2267 * If only one mapping left we no longer need to cache inhibit
2268 */
2269
2270 if (pmap_aliasmask &&
2271 pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
2272 PMAP_DPRINTF(PDB_CACHE,
2273 ("remove: clearing CI for pa %lx\n", pa));
2274 pvh->pvh_attrs &= ~PVH_CI;
2275 pmap_changebit(pa, 0, (pt_entry_t)~PG_CI);
2276 #ifdef DEBUG
2277 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
2278 (PDB_CACHE|PDB_PVDUMP))
2279 pmap_pvdump(pa);
2280 #endif
2281 }
2282 #endif
2283
2284 /*
2285 * If this was a PT page we must also remove the
2286 * mapping from the associated segment table.
2287 */
2288
2289 if (ste) {
2290 PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2291 ("remove: ste was %x@%p pte was %x@%p\n",
2292 *ste, ste, opte, pmap_pte(pmap, va)));
2293 #if defined(M68040) || defined(M68060)
2294 #if defined(M68020) || defined(M68030)
2295 if (mmutype == MMU_68040)
2296 #endif
2297 {
2298 st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
2299
2300 while (ste < este)
2301 *ste++ = SG_NV;
2302 #ifdef DEBUG
2303 ste -= NPTEPG/SG4_LEV3SIZE;
2304 #endif
2305 }
2306 #if defined(M68020) || defined(M68030)
2307 else
2308 #endif
2309 #endif
2310 #if defined(M68020) || defined(M68030)
2311 *ste = SG_NV;
2312 #endif
2313
2314 /*
2315 * If it was a user PT page, we decrement the
2316 * reference count on the segment table as well,
2317 * freeing it if it is now empty.
2318 */
2319
2320 if (ptpmap != pmap_kernel()) {
2321 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2322 ("remove: stab %p, refcnt %d\n",
2323 ptpmap->pm_stab, ptpmap->pm_sref - 1));
2324 #ifdef DEBUG
2325 if ((pmapdebug & PDB_PARANOIA) &&
2326 ptpmap->pm_stab !=
2327 (st_entry_t *)trunc_page((vaddr_t)ste))
2328 panic("remove: bogus ste");
2329 #endif
2330 if (--(ptpmap->pm_sref) == 0) {
2331 PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2332 ("remove: free stab %p\n",
2333 ptpmap->pm_stab));
2334 uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
2335 M68K_STSIZE, UVM_KMF_WIRED);
2336 ptpmap->pm_stab = Segtabzero;
2337 ptpmap->pm_stpa = Segtabzeropa;
2338 #if defined(M68040) || defined(M68060)
2339 #if defined(M68020) || defined(M68030)
2340 if (mmutype == MMU_68040)
2341 #endif
2342 ptpmap->pm_stfree = protostfree;
2343 #endif
2344 /*
2345 * Segment table has changed; reload the
2346 * MMU if it's the active user pmap.
2347 */
2348 if (active_user_pmap(ptpmap)) {
2349 pmap_load_urp((paddr_t)ptpmap->pm_stpa);
2350 }
2351 }
2352 }
2353 pvh->pvh_attrs &= ~PVH_PTPAGE;
2354 ptpmap->pm_ptpages--;
2355 }
2356
2357 /*
2358 * Update saved attributes for managed page
2359 */
2360
2361 pvh->pvh_attrs |= bits;
2362 splx(s);
2363
2364 if (opvp != NULL)
2365 *opvp = opv;
2366 else if (opv != NULL)
2367 pmap_free_pv(opv);
2368 }
2369
2370 /*
2371 * pmap_testbit:
2372 *
2373 * Test the modified/referenced bits of a physical page.
2374 */
2375 /* static */
2376 bool
pmap_testbit(paddr_t pa,int bit)2377 pmap_testbit(paddr_t pa, int bit)
2378 {
2379 struct pv_header *pvh;
2380 struct pv_entry *pv;
2381 pt_entry_t *pte;
2382 int s;
2383
2384 pvh = pa_to_pvh(pa);
2385 pv = &pvh->pvh_first;
2386 s = splvm();
2387
2388 /*
2389 * Check saved info first
2390 */
2391
2392 if (pvh->pvh_attrs & bit) {
2393 splx(s);
2394 return true;
2395 }
2396
2397 #ifdef CACHE_HAVE_VAC
2398
2399 /*
2400 * Flush VAC to get correct state of any hardware maintained bits.
2401 */
2402
2403 if (pmap_aliasmask && (bit & (PG_U|PG_M)))
2404 DCIS();
2405 #endif
2406
2407 /*
2408 * Not found. Check current mappings, returning immediately if
2409 * found. Cache a hit to speed future lookups.
2410 */
2411
2412 if (pv->pv_pmap != NULL) {
2413 for (; pv; pv = pv->pv_next) {
2414 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2415 if (*pte & bit) {
2416 pvh->pvh_attrs |= bit;
2417 splx(s);
2418 return true;
2419 }
2420 }
2421 }
2422 splx(s);
2423 return false;
2424 }
2425
2426 /*
2427 * pmap_changebit:
2428 *
2429 * Change the modified/referenced bits, or other PTE bits,
2430 * for a physical page.
2431 */
2432 /* static */
2433 bool
pmap_changebit(paddr_t pa,pt_entry_t set,pt_entry_t mask)2434 pmap_changebit(paddr_t pa, pt_entry_t set, pt_entry_t mask)
2435 {
2436 struct pv_header *pvh;
2437 struct pv_entry *pv;
2438 pt_entry_t *pte, npte;
2439 vaddr_t va;
2440 int s;
2441 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
2442 bool firstpage = true;
2443 #endif
2444 bool r;
2445
2446 PMAP_DPRINTF(PDB_BITS,
2447 ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
2448
2449 pvh = pa_to_pvh(pa);
2450 pv = &pvh->pvh_first;
2451 s = splvm();
2452
2453 /*
2454 * Clear saved attributes (modify, reference)
2455 */
2456
2457 r = (pvh->pvh_attrs & ~mask) != 0;
2458 pvh->pvh_attrs &= mask;
2459
2460 /*
2461 * Loop over all current mappings setting/clearing as appropriate
2462 * If setting RO do we need to clear the VAC?
2463 */
2464
2465 if (pv->pv_pmap != NULL) {
2466 #ifdef DEBUG
2467 int toflush = 0;
2468 #endif
2469 for (; pv; pv = pv->pv_next) {
2470 #ifdef DEBUG
2471 toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
2472 #endif
2473 va = pv->pv_va;
2474 pte = pmap_pte(pv->pv_pmap, va);
2475 #ifdef CACHE_HAVE_VAC
2476
2477 /*
2478 * Flush VAC to ensure we get correct state of HW bits
2479 * so we don't clobber them.
2480 */
2481
2482 if (firstpage && pmap_aliasmask) {
2483 firstpage = false;
2484 DCIS();
2485 }
2486 #endif
2487 npte = (*pte | set) & mask;
2488 if (*pte != npte) {
2489 r = true;
2490 #if defined(M68040) || defined(M68060)
2491 /*
2492 * If we are changing caching status or
2493 * protection make sure the caches are
2494 * flushed (but only once).
2495 */
2496 if (firstpage &&
2497 #if defined(M68020) || defined(M68030)
2498 (mmutype == MMU_68040) &&
2499 #endif
2500 ((set == PG_RO) ||
2501 (set & PG_CMASK) ||
2502 (mask & PG_CMASK) == 0)) {
2503 firstpage = false;
2504 DCFP(pa);
2505 ICPP(pa);
2506 }
2507 #endif
2508 *pte = npte;
2509 if (active_pmap(pv->pv_pmap))
2510 TBIS(va);
2511 }
2512 }
2513 }
2514 splx(s);
2515 return r;
2516 }
2517
2518 /*
2519 * pmap_enter_ptpage:
2520 *
2521 * Allocate and map a PT page for the specified pmap/va pair.
2522 */
2523 /* static */
2524 int
pmap_enter_ptpage(pmap_t pmap,vaddr_t va,bool can_fail)2525 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
2526 {
2527 paddr_t ptpa;
2528 struct vm_page *pg;
2529 struct pv_header *pvh;
2530 struct pv_entry *pv;
2531 st_entry_t *ste;
2532 int s;
2533
2534 PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
2535 ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
2536
2537 /*
2538 * Allocate a segment table if necessary. Note that it is allocated
2539 * from a private map and not pt_map. This keeps user page tables
2540 * aligned on segment boundaries in the kernel address space.
2541 * The segment table is wired down. It will be freed whenever the
2542 * reference count drops to zero.
2543 */
2544 if (pmap->pm_stab == Segtabzero) {
2545 pmap->pm_stab = (st_entry_t *)
2546 uvm_km_alloc(st_map, M68K_STSIZE, 0,
2547 UVM_KMF_WIRED | UVM_KMF_ZERO |
2548 (can_fail ? UVM_KMF_NOWAIT : 0));
2549 if (pmap->pm_stab == NULL) {
2550 pmap->pm_stab = Segtabzero;
2551 return ENOMEM;
2552 }
2553 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
2554 (paddr_t *)&pmap->pm_stpa);
2555 #if defined(M68040) || defined(M68060)
2556 #if defined(M68020) || defined(M68030)
2557 if (mmutype == MMU_68040)
2558 #endif
2559 {
2560 pt_entry_t *pte;
2561
2562 pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
2563 *pte = (*pte & ~PG_CMASK) | PG_CI;
2564 pmap->pm_stfree = protostfree;
2565 }
2566 #endif
2567 /*
2568 * Segment table has changed; reload the
2569 * MMU if it's the active user pmap.
2570 */
2571 if (active_user_pmap(pmap)) {
2572 pmap_load_urp((paddr_t)pmap->pm_stpa);
2573 }
2574
2575 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2576 ("enter: pmap %p stab %p(%p)\n",
2577 pmap, pmap->pm_stab, pmap->pm_stpa));
2578 }
2579
2580 ste = pmap_ste(pmap, va);
2581 #if defined(M68040) || defined(M68060)
2582 /*
2583 * Allocate level 2 descriptor block if necessary
2584 */
2585 #if defined(M68020) || defined(M68030)
2586 if (mmutype == MMU_68040)
2587 #endif
2588 {
2589 if (*ste == SG_NV) {
2590 int ix;
2591 void *addr;
2592
2593 ix = bmtol2(pmap->pm_stfree);
2594 if (ix == -1)
2595 panic("enter: out of address space"); /* XXX */
2596 pmap->pm_stfree &= ~l2tobm(ix);
2597 addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2598 memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
2599 addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2600 *ste = (u_int)addr | SG_RW | SG_U | SG_V;
2601
2602 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2603 ("enter: alloc ste2 %d(%p)\n", ix, addr));
2604 }
2605 ste = pmap_ste2(pmap, va);
2606 /*
2607 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2608 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2609 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
2610 * PT page--the unit of allocation. We set `ste' to point
2611 * to the first entry of that chunk which is validated in its
2612 * entirety below.
2613 */
2614 ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
2615
2616 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2617 ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
2618 }
2619 #endif
2620 va = trunc_page((vaddr_t)pmap_pte(pmap, va));
2621
2622 /*
2623 * In the kernel we allocate a page from the kernel PT page
2624 * free list and map it into the kernel page table map (via
2625 * pmap_enter).
2626 */
2627 if (pmap == pmap_kernel()) {
2628 struct kpt_page *kpt;
2629
2630 s = splvm();
2631 if ((kpt = kpt_free_list) == NULL) {
2632 /*
2633 * No PT pages available.
2634 * Try once to free up unused ones.
2635 */
2636 PMAP_DPRINTF(PDB_COLLECT,
2637 ("enter: no KPT pages, collecting...\n"));
2638 pmap_collect();
2639 if ((kpt = kpt_free_list) == NULL)
2640 panic("pmap_enter_ptpage: can't get KPT page");
2641 }
2642 kpt_free_list = kpt->kpt_next;
2643 kpt->kpt_next = kpt_used_list;
2644 kpt_used_list = kpt;
2645 ptpa = kpt->kpt_pa;
2646 memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
2647 pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
2648 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2649 pmap_update(pmap);
2650 #ifdef DEBUG
2651 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2652 int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2653
2654 printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
2655 ix, Sysptmap[ix], kpt->kpt_va);
2656 }
2657 #endif
2658 splx(s);
2659 } else {
2660
2661 /*
2662 * For user processes we just allocate a page from the
2663 * VM system. Note that we set the page "wired" count to 1,
2664 * which is what we use to check if the page can be freed.
2665 * See pmap_remove_mapping().
2666 *
2667 * Count the segment table reference first so that we won't
2668 * lose the segment table when low on memory.
2669 */
2670
2671 pmap->pm_sref++;
2672 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2673 ("enter: about to alloc UPT pg at %lx\n", va));
2674 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2675 while ((pg = uvm_pagealloc(uvm_kernel_object,
2676 va - vm_map_min(kernel_map),
2677 NULL, UVM_PGA_ZERO)) == NULL) {
2678 rw_exit(uvm_kernel_object->vmobjlock);
2679 if (can_fail) {
2680 pmap->pm_sref--;
2681 return ENOMEM;
2682 }
2683 uvm_wait("ptpage");
2684 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2685 }
2686 rw_exit(uvm_kernel_object->vmobjlock);
2687 pg->flags &= ~(PG_BUSY|PG_FAKE);
2688 UVM_PAGE_OWN(pg, NULL);
2689 ptpa = VM_PAGE_TO_PHYS(pg);
2690 pmap_enter(pmap_kernel(), va, ptpa,
2691 VM_PROT_READ | VM_PROT_WRITE,
2692 VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2693 pmap_update(pmap_kernel());
2694 }
2695 #if defined(M68040) || defined(M68060)
2696 /*
2697 * Turn off copyback caching of page table pages,
2698 * could get ugly otherwise.
2699 */
2700 #if defined(M68020) || defined(M68030)
2701 if (mmutype == MMU_68040)
2702 #endif
2703 {
2704 #ifdef DEBUG
2705 pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
2706 if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2707 printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
2708 pmap == pmap_kernel() ? "Kernel" : "User",
2709 va, ptpa, pte, *pte);
2710 #endif
2711 if (pmap_changebit(ptpa, PG_CI, (pt_entry_t)~PG_CCB))
2712 DCIS();
2713 }
2714 #endif
2715 /*
2716 * Locate the PV entry in the kernel for this PT page and
2717 * record the STE address. This is so that we can invalidate
2718 * the STE when we remove the mapping for the page.
2719 */
2720 pvh = pa_to_pvh(ptpa);
2721 s = splvm();
2722 if (pvh) {
2723 pv = &pvh->pvh_first;
2724 pvh->pvh_attrs |= PVH_PTPAGE;
2725 do {
2726 if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
2727 break;
2728 } while ((pv = pv->pv_next));
2729 } else {
2730 pv = NULL;
2731 }
2732 #ifdef DEBUG
2733 if (pv == NULL)
2734 panic("pmap_enter_ptpage: PT page not entered");
2735 #endif
2736 pv->pv_ptste = ste;
2737 pv->pv_ptpmap = pmap;
2738
2739 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2740 ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
2741
2742 /*
2743 * Map the new PT page into the segment table.
2744 * Also increment the reference count on the segment table if this
2745 * was a user page table page. Note that we don't use vm_map_pageable
2746 * to keep the count like we do for PT pages, this is mostly because
2747 * it would be difficult to identify ST pages in pmap_pageable to
2748 * release them. We also avoid the overhead of vm_map_pageable.
2749 */
2750 #if defined(M68040) || defined(M68060)
2751 #if defined(M68020) || defined(M68030)
2752 if (mmutype == MMU_68040)
2753 #endif
2754 {
2755 st_entry_t *este;
2756
2757 for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2758 *ste = ptpa | SG_U | SG_RW | SG_V;
2759 ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2760 }
2761 }
2762 #if defined(M68020) || defined(M68030)
2763 else
2764 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2765 #endif
2766 #else
2767 *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2768 #endif
2769 if (pmap != pmap_kernel()) {
2770 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2771 ("enter: stab %p refcnt %d\n",
2772 pmap->pm_stab, pmap->pm_sref));
2773 }
2774 /*
2775 * Flush stale TLB info.
2776 */
2777 if (pmap == pmap_kernel())
2778 TBIAS();
2779 else
2780 TBIAU();
2781 pmap->pm_ptpages++;
2782 splx(s);
2783
2784 return 0;
2785 }
2786
2787 /*
2788 * pmap_ptpage_addref:
2789 *
2790 * Add a reference to the specified PT page.
2791 */
2792 void
pmap_ptpage_addref(vaddr_t ptpva)2793 pmap_ptpage_addref(vaddr_t ptpva)
2794 {
2795 struct vm_page *pg;
2796
2797 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2798 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
2799 pg->wire_count++;
2800 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2801 ("ptpage addref: pg %p now %d\n",
2802 pg, pg->wire_count));
2803 rw_exit(uvm_kernel_object->vmobjlock);
2804 }
2805
2806 /*
2807 * pmap_ptpage_delref:
2808 *
2809 * Delete a reference to the specified PT page.
2810 */
2811 int
pmap_ptpage_delref(vaddr_t ptpva)2812 pmap_ptpage_delref(vaddr_t ptpva)
2813 {
2814 struct vm_page *pg;
2815 int rv;
2816
2817 rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2818 pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
2819 rv = --pg->wire_count;
2820 PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2821 ("ptpage delref: pg %p now %d\n",
2822 pg, pg->wire_count));
2823 rw_exit(uvm_kernel_object->vmobjlock);
2824 return rv;
2825 }
2826
2827 /*
2828 * Routine: pmap_procwr
2829 *
2830 * Function:
2831 * Synchronize caches corresponding to [addr, addr + len) in p.
2832 */
2833 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)2834 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2835 {
2836
2837 (void)cachectl1(0x80000004, va, len, p);
2838 }
2839
2840 void
_pmap_set_page_cacheable(pmap_t pmap,vaddr_t va)2841 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
2842 {
2843
2844 if (!pmap_ste_v(pmap, va))
2845 return;
2846
2847 #if defined(M68040) || defined(M68060)
2848 #if defined(M68020) || defined(M68030)
2849 if (mmutype == MMU_68040) {
2850 #endif
2851 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB,
2852 (pt_entry_t)~PG_CI))
2853 DCIS();
2854
2855 #if defined(M68020) || defined(M68030)
2856 } else
2857 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
2858 (pt_entry_t)~PG_CI);
2859 #endif
2860 #else
2861 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
2862 (pt_entry_t)~PG_CI);
2863 #endif
2864 }
2865
2866 void
_pmap_set_page_cacheinhibit(pmap_t pmap,vaddr_t va)2867 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
2868 {
2869
2870 if (!pmap_ste_v(pmap, va))
2871 return;
2872
2873 #if defined(M68040) || defined(M68060)
2874 #if defined(M68020) || defined(M68030)
2875 if (mmutype == MMU_68040) {
2876 #endif
2877 if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI,
2878 (pt_entry_t)~PG_CCB))
2879 DCIS();
2880 #if defined(M68020) || defined(M68030)
2881 } else
2882 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
2883 #endif
2884 #else
2885 pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
2886 #endif
2887 }
2888
2889 int
_pmap_page_is_cacheable(pmap_t pmap,vaddr_t va)2890 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
2891 {
2892
2893 if (!pmap_ste_v(pmap, va))
2894 return 0;
2895
2896 return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
2897 }
2898
2899 #ifdef DEBUG
2900 /*
2901 * pmap_pvdump:
2902 *
2903 * Dump the contents of the PV list for the specified physical page.
2904 */
2905 void
pmap_pvdump(paddr_t pa)2906 pmap_pvdump(paddr_t pa)
2907 {
2908 struct pv_header *pvh;
2909 struct pv_entry *pv;
2910
2911 printf("pa %lx", pa);
2912 pvh = pa_to_pvh(pa);
2913 for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
2914 printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
2915 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
2916 printf("\n");
2917 }
2918
2919 /*
2920 * pmap_check_wiring:
2921 *
2922 * Count the number of valid mappings in the specified PT page,
2923 * and ensure that it is consistent with the number of wirings
2924 * to that page that the VM system has.
2925 */
2926 void
pmap_check_wiring(const char * str,vaddr_t va)2927 pmap_check_wiring(const char *str, vaddr_t va)
2928 {
2929 pt_entry_t *pte;
2930 paddr_t pa;
2931 struct vm_page *pg;
2932 int count;
2933
2934 if (!pmap_ste_v(pmap_kernel(), va) ||
2935 !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
2936 return;
2937
2938 pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
2939 pg = PHYS_TO_VM_PAGE(pa);
2940 if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
2941 panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
2942 }
2943
2944 count = 0;
2945 for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
2946 pte++)
2947 if (*pte)
2948 count++;
2949 if (pg->wire_count != count)
2950 panic("*%s*: 0x%lx: w%d/a%d",
2951 str, va, pg->wire_count, count);
2952 }
2953 #endif /* DEBUG */
2954
2955 /*
2956 * XXX XXX XXX These are legacy remants and should go away XXX XXX XXX
2957 * (Cribbed from vm_machdep.c because they're tied to this pmap impl.)
2958 */
2959
2960 /*
2961 * Map `size' bytes of physical memory starting at `paddr' into
2962 * kernel VA space at `vaddr'. Read/write and cache-inhibit status
2963 * are specified by `prot'.
2964 */
2965 void
physaccess(void * vaddr,void * paddr,int size,int prot)2966 physaccess(void *vaddr, void *paddr, int size, int prot)
2967 {
2968 pt_entry_t *pte;
2969 u_int page;
2970
2971 pte = kvtopte(vaddr);
2972 page = (u_int)paddr & PG_FRAME;
2973 for (size = btoc(size); size; size--) {
2974 *pte++ = PG_V | prot | page;
2975 page += PAGE_SIZE;
2976 }
2977 TBIAS();
2978 }
2979
2980 void
physunaccess(void * vaddr,int size)2981 physunaccess(void *vaddr, int size)
2982 {
2983 pt_entry_t *pte;
2984
2985 pte = kvtopte(vaddr);
2986 for (size = btoc(size); size; size--)
2987 *pte++ = PG_NV;
2988 TBIAS();
2989 }
2990
2991 /*
2992 * Convert kernel VA to physical address
2993 */
2994 int
kvtop(void * addr)2995 kvtop(void *addr)
2996 {
2997 return (int)vtophys((vaddr_t)addr);
2998 }
2999