1 /* $NetBSD: pmap.c,v 1.52 2023/12/27 17:35:36 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, Gordon W. Ross, and Matthew Fredette.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Some notes:
34 *
35 * sun2s have contexts (8). In this pmap design, the kernel is mapped
36 * into context zero. Processes take up a known portion of the context,
37 * and compete for the available contexts on a LRU basis.
38 *
39 * sun2s also have this evil "PMEG" crapola. Essentially each "context"'s
40 * address space is defined by the 512 one-byte entries in the segment map.
41 * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
42 * which contains the mappings for that virtual segment. (This strange
43 * terminology invented by Sun and preserved here for consistency.)
44 * Each PMEG maps a segment of 32Kb length, with 16 pages of 2Kb each.
45 *
46 * As you might guess, these PMEGs are in short supply and heavy demand.
47 * PMEGs allocated to the kernel are "static" in the sense that they can't
48 * be stolen from it. PMEGs allocated to a particular segment of a
49 * pmap's virtual space will be fought over by the other pmaps.
50 *
51 * This pmap was once sys/arch/sun3/sun3/pmap.c revision 1.135.
52 */
53
54 /*
55 * Cache management:
56 * sun2's don't have cache implementations, but for now the caching
57 * code remains in. it's harmless (and, due to our 0 definitions of
58 * PG_NC and BADALIAS, should optimize away), and keeping it in makes
59 * it easier to diff this file against its cousin, sys/arch/sun3/sun3/pmap.c.
60 */
61
62 /*
63 * wanted attributes:
64 * pmegs that aren't needed by a pmap remain in the MMU.
65 * quick context switches between pmaps
66 */
67
68 /*
69 * Project1: Use a "null" context for processes that have not
70 * touched any user-space address recently. This is efficient
71 * for things that stay in the kernel for a while, waking up
72 * to handle some I/O then going back to sleep (i.e. nfsd).
73 * If and when such a process returns to user-mode, it will
74 * fault and be given a real context at that time.
75 *
76 * This also lets context switch be fast, because all we need
77 * to do there for the MMU is slam the context register.
78 *
79 * Project2: Use a private pool of PV elements. This pool can be
80 * fixed size because the total mapped virtual space supported by
81 * the MMU H/W (and this pmap) is fixed for all time.
82 */
83
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.52 2023/12/27 17:35:36 thorpej Exp $");
86
87 #include "opt_ddb.h"
88 #include "opt_pmap_debug.h"
89
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/proc.h>
93 #include <sys/kmem.h>
94 #include <sys/pool.h>
95 #include <sys/queue.h>
96 #include <sys/kcore.h>
97 #include <sys/atomic.h>
98
99 #include <uvm/uvm.h>
100
101 #include <machine/cpu.h>
102 #include <machine/dvma.h>
103 #include <machine/idprom.h>
104 #include <machine/kcore.h>
105 #include <machine/promlib.h>
106 #include <machine/pmap.h>
107 #include <machine/pte.h>
108 #include <machine/vmparam.h>
109
110 #include <sun2/sun2/control.h>
111 #include <sun2/sun2/machdep.h>
112
113 #ifdef DDB
114 #include <ddb/db_output.h>
115 #else
116 #define db_printf printf
117 #endif
118
119 /* Verify this correspondence between definitions. */
120 #if (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
121 #error "PMAP_XXX definitions don't match pte.h!"
122 #endif
123
124 /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
125 #define PMAP_TYPE PMAP_MBIO
126
127 /*
128 * Local convenience macros
129 */
130
131 #define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
132
133 /* User segments are all of them. */
134 #define NUSEG (NSEGMAP)
135
136 #define VA_SEGNUM(x) ((u_int)(x) >> SEGSHIFT)
137
138 /*
139 * Only "main memory" pages are registered in the pv_lists.
140 * This macro is used to determine if a given pte refers to
141 * "main memory" or not. One slight hack here deserves more
142 * explanation: On the Sun-2, the bwtwo and zs1 appear
143 * as PG_OBMEM devices at 0x00700000 and 0x00780000,
144 * respectively. We do not want to consider these as
145 * "main memory" so the macro below treats obmem addresses
146 * >= 0x00700000 as device addresses. NB: this means for now,
147 * you can't have a headless Sun-2 with 8MB of main memory.
148 */
149 #define IS_MAIN_MEM(pte) (((pte) & PG_TYPE) == 0 && PG_PA(pte) < 0x00700000)
150
151 /* Does this (pseudo) PA represent device space? */
152 #define PA_IS_DEV(pa) (((pa) & PMAP_TYPE) != 0 || (pa) >= 0x00700000)
153
154 /*
155 * Is there a Virtually Addressed Cache (VAC) alias problem
156 * if one page is mapped at both a1 and a2?
157 */
158 #define BADALIAS(a1, a2) (0)
159
160
161 /*
162 * Debugging support.
163 */
164 #define PMD_ENTER 1
165 #define PMD_LINK 2
166 #define PMD_PROTECT 4
167 #define PMD_SWITCH 8
168 #define PMD_COW 0x10
169 #define PMD_MODBIT 0x20
170 #define PMD_REFBIT 0x40
171 #define PMD_WIRING 0x80
172 #define PMD_CONTEXT 0x100
173 #define PMD_CREATE 0x200
174 #define PMD_SEGMAP 0x400
175 #define PMD_SETPTE 0x800
176 #define PMD_FAULT 0x1000
177 #define PMD_KMAP 0x2000
178
179 #define PMD_REMOVE PMD_ENTER
180 #define PMD_UNLINK PMD_LINK
181
182 #ifdef PMAP_DEBUG
183 int pmap_debug = 0;
184 int pmap_db_watchva = -1;
185 int pmap_db_watchpmeg = -1;
186 #endif /* PMAP_DEBUG */
187
188 /*
189 * Miscellaneous variables.
190 *
191 * For simplicity, this interface retains the variables
192 * that were used in the old interface (without NONCONTIG).
193 * These are set in pmap_bootstrap() and used in
194 * pmap_next_page().
195 */
196 vaddr_t virtual_avail, virtual_end;
197 paddr_t avail_start, avail_end;
198 #define managed(pa) (((pa) >= avail_start) && ((pa) < avail_end))
199
200 /* used to skip a single hole in RAM */
201 static vaddr_t hole_start, hole_size;
202
203 /* This is for pmap_next_page() */
204 static paddr_t avail_next;
205
206 /* This is where we map a PMEG without a context. */
207 static vaddr_t temp_seg_va;
208 #ifdef DIAGNOSTIC
209 static int temp_seg_inuse;
210 #endif
211
212 /*
213 * Location to store virtual addresses
214 * to be used in copy/zero operations.
215 */
216 vaddr_t tmp_vpages[2] = {
217 PAGE_SIZE * 8,
218 PAGE_SIZE * 9 };
219 int tmp_vpages_inuse;
220
221 static int pmap_version = 1;
222 static struct pmap kernel_pmap_store;
223 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
224 #define kernel_pmap (kernel_pmap_ptr)
225 static u_char kernel_segmap[NSEGMAP];
226
227 /* memory pool for pmap structures */
228 struct pool pmap_pmap_pool;
229
230 /* statistics... */
231 struct pmap_stats {
232 int ps_enter_firstpv; /* pv heads entered */
233 int ps_enter_secondpv; /* pv nonheads entered */
234 int ps_unlink_pvfirst; /* of pv_unlinks on head */
235 int ps_unlink_pvsearch; /* of pv_unlink searches */
236 int ps_pmeg_faultin; /* pmegs reloaded */
237 int ps_changeprots; /* of calls to changeprot */
238 int ps_changewire; /* useless wiring changes */
239 int ps_npg_prot_all; /* of active pages protected */
240 int ps_npg_prot_actual; /* pages actually affected */
241 int ps_vac_uncached; /* non-cached due to bad alias */
242 int ps_vac_recached; /* re-cached when bad alias gone */
243 } pmap_stats;
244
245 #ifdef PMAP_DEBUG
246 #define CHECK_SPL() do { \
247 if ((getsr() & PSL_IPL) < PSL_IPL4) \
248 panic("pmap: bad spl, line %d", __LINE__); \
249 } while (0)
250 #else /* PMAP_DEBUG */
251 #define CHECK_SPL() (void)0
252 #endif /* PMAP_DEBUG */
253
254
255 /*
256 * PV support.
257 * (i.e. Find all virtual mappings of a physical page.)
258 */
259
260 int pv_initialized = 0;
261
262 /* One of these for each mapped virtual page. */
263 struct pv_entry {
264 struct pv_entry *pv_next;
265 pmap_t pv_pmap;
266 vaddr_t pv_va;
267 };
268 typedef struct pv_entry *pv_entry_t;
269
270 /* Table of PV list heads (per physical page). */
271 static struct pv_entry **pv_head_tbl;
272
273 /* Free list of PV entries. */
274 static struct pv_entry *pv_free_list;
275
276 /* Table of flags (per physical page). */
277 static u_char *pv_flags_tbl;
278
279 /* These are as in the MMU but shifted by PV_SHIFT. */
280 #define PV_SHIFT 20
281 #define PV_VALID (PG_VALID >> PV_SHIFT)
282 #define PV_NC (PG_NC >> PV_SHIFT)
283 #define PV_TYPE (PG_TYPE >> PV_SHIFT)
284 #define PV_REF (PG_REF >> PV_SHIFT)
285 #define PV_MOD (PG_MOD >> PV_SHIFT)
286
287
288 /*
289 * context structures, and queues
290 */
291
292 struct context_state {
293 TAILQ_ENTRY(context_state) context_link;
294 int context_num;
295 struct pmap *context_upmap;
296 };
297 typedef struct context_state *context_t;
298
299 #define INVALID_CONTEXT -1 /* impossible value */
300 #define EMPTY_CONTEXT 0
301 #define KERNEL_CONTEXT 0
302 #define FIRST_CONTEXT 1
303 #define has_context(pmap) (((pmap)->pm_ctxnum != EMPTY_CONTEXT) == ((pmap) != kernel_pmap))
304
TAILQ_HEAD(context_tailq,context_state)305 TAILQ_HEAD(context_tailq, context_state)
306 context_free_queue, context_active_queue;
307
308 static struct context_state context_array[NCONTEXT];
309
310
311 /*
312 * PMEG structures, queues, and macros
313 */
314 #define PMEGQ_FREE 0
315 #define PMEGQ_INACTIVE 1
316 #define PMEGQ_ACTIVE 2
317 #define PMEGQ_KERNEL 3
318 #define PMEGQ_NONE 4
319
320 struct pmeg_state {
321 TAILQ_ENTRY(pmeg_state) pmeg_link;
322 int pmeg_index;
323 pmap_t pmeg_owner;
324 int pmeg_version;
325 vaddr_t pmeg_va;
326 int pmeg_wired;
327 int pmeg_reserved;
328 int pmeg_vpages;
329 int pmeg_qstate;
330 };
331
332 typedef struct pmeg_state *pmeg_t;
333
334 #define PMEG_INVAL (NPMEG-1)
335 #define PMEG_NULL (pmeg_t) NULL
336
337 /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
338 TAILQ_HEAD(pmeg_tailq, pmeg_state)
339 pmeg_free_queue, pmeg_inactive_queue,
340 pmeg_active_queue, pmeg_kernel_queue;
341
342 static struct pmeg_state pmeg_array[NPMEG];
343
344
345 /*
346 * prototypes
347 */
348 static int get_pte_pmeg(int, int);
349 static void set_pte_pmeg(int, int, int);
350
351 static void context_allocate(pmap_t);
352 static void context_free(pmap_t);
353 static void context_init(void);
354
355 static void pmeg_init(void);
356 static void pmeg_reserve(int);
357
358 static pmeg_t pmeg_allocate(pmap_t, vaddr_t);
359 static void pmeg_mon_init(vaddr_t, vaddr_t, int);
360 static void pmeg_release(pmeg_t);
361 static void pmeg_free(pmeg_t);
362 static pmeg_t pmeg_cache(pmap_t, vaddr_t);
363 static void pmeg_set_wiring(pmeg_t, vaddr_t, int);
364
365 static int pv_link (pmap_t, int, vaddr_t);
366 static void pv_unlink(pmap_t, int, vaddr_t);
367 static void pv_remove_all(paddr_t);
368 static void pv_changepte(paddr_t, int, int);
369 static u_int pv_syncflags(pv_entry_t);
370 static void pv_init(void);
371
372 static void pmeg_clean(pmeg_t);
373 static void pmeg_clean_free(void);
374
375 static void pmap_common_init(pmap_t);
376 static void pmap_kernel_init(pmap_t);
377 static void pmap_user_init(pmap_t);
378 static void pmap_page_upload(void);
379
380 static void pmap_enter_kernel(vaddr_t, int, bool);
381 static void pmap_enter_user(pmap_t, vaddr_t, int, bool);
382
383 static void pmap_protect1(pmap_t, vaddr_t, vaddr_t);
384 static void pmap_protect_mmu(pmap_t, vaddr_t, vaddr_t);
385 static void pmap_protect_noctx(pmap_t, vaddr_t, vaddr_t);
386
387 static void pmap_remove1(pmap_t, vaddr_t, vaddr_t);
388 static void pmap_remove_mmu(pmap_t, vaddr_t, vaddr_t);
389 static void pmap_remove_noctx(pmap_t, vaddr_t, vaddr_t);
390
391 static int pmap_fault_reload(struct pmap *, vaddr_t, int);
392
393 /* Called only from locore.s and pmap.c */
394 void _pmap_switch(pmap_t);
395
396 #ifdef PMAP_DEBUG
397 void pmap_print(pmap_t);
398 void pv_print(paddr_t);
399 void pmeg_print(pmeg_t);
400 static void pmeg_verify_empty(vaddr_t);
401 #endif /* PMAP_DEBUG */
402 void pmap_pinit(pmap_t);
403 void pmap_release(pmap_t);
404
405 /*
406 * Various in-line helper functions.
407 */
408
409 static inline pmap_t
current_pmap(void)410 current_pmap(void)
411 {
412 struct vmspace *vm;
413 struct vm_map *map;
414 pmap_t pmap;
415
416 vm = curproc->p_vmspace;
417 map = &vm->vm_map;
418 pmap = vm_map_pmap(map);
419
420 return (pmap);
421 }
422
423 static inline struct pv_entry **
pa_to_pvhead(paddr_t pa)424 pa_to_pvhead(paddr_t pa)
425 {
426 int idx;
427
428 idx = PA_PGNUM(pa);
429 #ifdef DIAGNOSTIC
430 if (PA_IS_DEV(pa) || (idx >= physmem))
431 panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
432 #endif
433 return (&pv_head_tbl[idx]);
434 }
435
436 static inline u_char *
pa_to_pvflags(paddr_t pa)437 pa_to_pvflags(paddr_t pa)
438 {
439 int idx;
440
441 idx = PA_PGNUM(pa);
442 #ifdef DIAGNOSTIC
443 if (PA_IS_DEV(pa) || (idx >= physmem))
444 panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
445 #endif
446 return (&pv_flags_tbl[idx]);
447 }
448
449 /*
450 * Save the MOD bit from the given PTE using its PA
451 */
452 static inline void
save_modref_bits(int pte)453 save_modref_bits(int pte)
454 {
455 u_char *pv_flags;
456
457 pv_flags = pa_to_pvflags(PG_PA(pte));
458 *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
459 }
460
461 static inline pmeg_t
pmeg_p(int sme)462 pmeg_p(int sme)
463 {
464 #ifdef DIAGNOSTIC
465 if (sme < 0 || sme >= SEGINV)
466 panic("pmeg_p: bad sme");
467 #endif
468 return &pmeg_array[sme];
469 }
470
471 #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
472
473 static void
pmeg_set_wiring(pmeg_t pmegp,vaddr_t va,int flag)474 pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag)
475 {
476 int idx, mask;
477
478 idx = VA_PTE_NUM(va);
479 mask = 1 << idx;
480
481 if (flag)
482 pmegp->pmeg_wired |= mask;
483 else
484 pmegp->pmeg_wired &= ~mask;
485 }
486
487 /****************************************************************
488 * Context management functions.
489 */
490
491 /* part of pmap_bootstrap */
492 static void
context_init(void)493 context_init(void)
494 {
495 int i;
496
497 TAILQ_INIT(&context_free_queue);
498 TAILQ_INIT(&context_active_queue);
499
500 /* Leave EMPTY_CONTEXT out of the free list. */
501 context_array[0].context_upmap = kernel_pmap;
502
503 for (i = 1; i < NCONTEXT; i++) {
504 context_array[i].context_num = i;
505 context_array[i].context_upmap = NULL;
506 TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
507 context_link);
508 #ifdef PMAP_DEBUG
509 if (pmap_debug & PMD_CONTEXT)
510 printf("context_init: sizeof(context_array[0])=%d\n",
511 sizeof(context_array[0]));
512 #endif
513 }
514 }
515
516 /* Get us a context (steal one if necessary). */
517 static void
context_allocate(pmap_t pmap)518 context_allocate(pmap_t pmap)
519 {
520 context_t context;
521
522 CHECK_SPL();
523
524 #ifdef DIAGNOSTIC
525 if (pmap == kernel_pmap)
526 panic("context_allocate: kernel_pmap");
527 if (has_context(pmap))
528 panic("pmap: pmap already has context allocated to it");
529 #endif
530
531 context = TAILQ_FIRST(&context_free_queue);
532 if (context == NULL) {
533 /* Steal the head of the active queue. */
534 context = TAILQ_FIRST(&context_active_queue);
535 if (context == NULL)
536 panic("pmap: no contexts left?");
537 #ifdef PMAP_DEBUG
538 if (pmap_debug & PMD_CONTEXT)
539 printf("context_allocate: steal ctx %d from pmap %p\n",
540 context->context_num, context->context_upmap);
541 #endif
542 context_free(context->context_upmap);
543 context = TAILQ_FIRST(&context_free_queue);
544 }
545 TAILQ_REMOVE(&context_free_queue, context, context_link);
546
547 #ifdef DIAGNOSTIC
548 if (context->context_upmap != NULL)
549 panic("pmap: context in use???");
550 #endif
551
552 context->context_upmap = pmap;
553 pmap->pm_ctxnum = context->context_num;
554
555 TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
556
557 /*
558 * We could reload the MMU here, but that would
559 * artificially move PMEGs from the inactive queue
560 * to the active queue, so do lazy reloading.
561 * XXX - Need to reload wired pmegs though...
562 * XXX: Verify the context it is empty?
563 */
564 }
565
566 /*
567 * Unload the context and put it on the free queue.
568 */
569 static void
context_free(pmap_t pmap)570 context_free(pmap_t pmap)
571 {
572 int saved_ctxnum, ctxnum;
573 int i, sme;
574 context_t contextp;
575 vaddr_t va;
576
577 CHECK_SPL();
578
579 ctxnum = pmap->pm_ctxnum;
580 if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
581 panic("pmap: context_free ctxnum");
582 contextp = &context_array[ctxnum];
583
584 /* Temporary context change. */
585 saved_ctxnum = get_context();
586 set_context(ctxnum);
587
588 /* Before unloading translations, flush cache. */
589 #ifdef HAVECACHE
590 if (cache_size)
591 cache_flush_context();
592 #endif
593
594 /* Unload MMU (but keep in SW segmap). */
595 for (i = 0, va = 0; i < NUSEG; i++, va += NBSG) {
596
597 #if !defined(PMAP_DEBUG)
598 /* Short-cut using the S/W segmap (if !debug). */
599 if (pmap->pm_segmap[i] == SEGINV)
600 continue;
601 #endif
602
603 /* Check the H/W segmap. */
604 sme = get_segmap(va);
605 if (sme == SEGINV)
606 continue;
607
608 /* Found valid PMEG in the segmap. */
609 #ifdef PMAP_DEBUG
610 if (pmap_debug & PMD_SEGMAP)
611 printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
612 "new=ff (cf)\n", ctxnum, va, sme);
613 #endif
614 #ifdef DIAGNOSTIC
615 if (sme != pmap->pm_segmap[i])
616 panic("context_free: unknown sme at va=0x%lx", va);
617 #endif
618 /* Did cache flush above (whole context). */
619 set_segmap(va, SEGINV);
620 /* In this case, do not clear pm_segmap. */
621 /* XXX: Maybe inline this call? */
622 pmeg_release(pmeg_p(sme));
623 }
624
625 /* Restore previous context. */
626 set_context(saved_ctxnum);
627
628 /* Dequeue, update, requeue. */
629 TAILQ_REMOVE(&context_active_queue, contextp, context_link);
630 pmap->pm_ctxnum = EMPTY_CONTEXT;
631 contextp->context_upmap = NULL;
632 TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
633 }
634
635
636 /****************************************************************
637 * PMEG management functions.
638 */
639
640 static void
pmeg_init(void)641 pmeg_init(void)
642 {
643 int x;
644
645 /* clear pmeg array, put it all on the free pmeq queue */
646
647 TAILQ_INIT(&pmeg_free_queue);
648 TAILQ_INIT(&pmeg_inactive_queue);
649 TAILQ_INIT(&pmeg_active_queue);
650 TAILQ_INIT(&pmeg_kernel_queue);
651
652 memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
653 for (x = 0; x < NPMEG; x++) {
654 TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x], pmeg_link);
655 pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
656 pmeg_array[x].pmeg_index = x;
657 }
658
659 /* The last pmeg is not usable. */
660 pmeg_reserve(SEGINV);
661 }
662
663 /*
664 * Reserve a pmeg (forever) for use by PROM, etc.
665 * Contents are left as-is. Called very early...
666 */
667 void
pmeg_reserve(int sme)668 pmeg_reserve(int sme)
669 {
670 pmeg_t pmegp;
671
672 /* Can not use pmeg_p() because it fails on SEGINV. */
673 pmegp = &pmeg_array[sme];
674
675 if (pmegp->pmeg_reserved) {
676 prom_printf("pmeg_reserve: already reserved\n");
677 prom_abort();
678 }
679 if (pmegp->pmeg_owner) {
680 prom_printf("pmeg_reserve: already owned\n");
681 prom_abort();
682 }
683
684 /* Owned by kernel, but not really usable... */
685 pmegp->pmeg_owner = kernel_pmap;
686 pmegp->pmeg_reserved++; /* keep count, just in case */
687 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
688 pmegp->pmeg_qstate = PMEGQ_NONE;
689 }
690
691 /*
692 * Examine PMEGs used by the monitor, and either
693 * reserve them (keep=1) or clear them (keep=0)
694 */
695 static void
pmeg_mon_init(vaddr_t sva,vaddr_t eva,int keep)696 pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
697 {
698 vaddr_t pgva, endseg;
699 int pte, valid;
700 unsigned char sme;
701
702 #ifdef PMAP_DEBUG
703 if (pmap_debug & PMD_SEGMAP)
704 prom_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
705 sva, eva, keep);
706 #endif
707
708 sva &= ~(NBSG - 1);
709
710 while (sva < eva) {
711 sme = get_segmap(sva);
712 if (sme != SEGINV) {
713 valid = 0;
714 endseg = sva + NBSG;
715 for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
716 pte = get_pte(pgva);
717 if (pte & PG_VALID) {
718 valid++;
719 }
720 }
721 #ifdef PMAP_DEBUG
722 if (pmap_debug & PMD_SEGMAP)
723 prom_printf(" sva=0x%x seg=0x%x valid=%d\n",
724 sva, sme, valid);
725 #endif
726 if (keep && valid)
727 pmeg_reserve(sme);
728 else
729 set_segmap(sva, SEGINV);
730 }
731 sva += NBSG;
732 }
733 }
734
735 /*
736 * This is used only during pmap_bootstrap, so we can
737 * get away with borrowing a slot in the segmap.
738 */
739 static void
pmeg_clean(pmeg_t pmegp)740 pmeg_clean(pmeg_t pmegp)
741 {
742 int sme;
743 vaddr_t va;
744
745 sme = get_segmap(temp_seg_va);
746 if (sme != SEGINV)
747 panic("pmeg_clean");
748
749 sme = pmegp->pmeg_index;
750 set_segmap(temp_seg_va, sme);
751
752 for (va = 0; va < NBSG; va += PAGE_SIZE)
753 set_pte(temp_seg_va + va, PG_INVAL);
754
755 set_segmap(temp_seg_va, SEGINV);
756 }
757
758 /*
759 * This routine makes sure that pmegs on the pmeg_free_queue contain
760 * no valid ptes. It pulls things off the queue, cleans them, and
761 * puts them at the end. The ending condition is finding the first
762 * queue element at the head of the queue again.
763 */
764 static void
pmeg_clean_free(void)765 pmeg_clean_free(void)
766 {
767 pmeg_t pmegp, pmegp_first;
768
769 pmegp = TAILQ_FIRST(&pmeg_free_queue);
770 if (pmegp == NULL)
771 panic("pmap: no free pmegs available to clean");
772
773 pmegp_first = NULL;
774
775 for (;;) {
776 pmegp = TAILQ_FIRST(&pmeg_free_queue);
777 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
778
779 pmegp->pmeg_qstate = PMEGQ_NONE;
780 pmeg_clean(pmegp);
781 pmegp->pmeg_qstate = PMEGQ_FREE;
782
783 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
784
785 if (pmegp == pmegp_first)
786 break;
787 if (pmegp_first == NULL)
788 pmegp_first = pmegp;
789 }
790 }
791
792 /*
793 * Allocate a PMEG by whatever means necessary.
794 * (May invalidate some mappings!)
795 */
796 static pmeg_t
pmeg_allocate(pmap_t pmap,vaddr_t va)797 pmeg_allocate(pmap_t pmap, vaddr_t va)
798 {
799 pmeg_t pmegp;
800
801 CHECK_SPL();
802
803 #ifdef DIAGNOSTIC
804 if (va & SEGOFSET) {
805 panic("pmap:pmeg_allocate: va=0x%lx", va);
806 }
807 #endif
808
809 /* Get one onto the free list if necessary. */
810 pmegp = TAILQ_FIRST(&pmeg_free_queue);
811 if (!pmegp) {
812 /* Try inactive queue... */
813 pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
814 if (!pmegp) {
815 /* Try active queue... */
816 pmegp = TAILQ_FIRST(&pmeg_active_queue);
817 }
818 if (!pmegp) {
819 panic("pmeg_allocate: failed");
820 }
821
822 /*
823 * Remove mappings to free-up a pmeg
824 * (so it will go onto the free list).
825 * XXX - Skip this one if it is wired?
826 */
827 pmap_remove1(pmegp->pmeg_owner,
828 pmegp->pmeg_va,
829 pmegp->pmeg_va + NBSG);
830 }
831
832 /* OK, free list has something for us to take. */
833 pmegp = TAILQ_FIRST(&pmeg_free_queue);
834 #ifdef DIAGNOSTIC
835 if (pmegp == NULL)
836 panic("pmeg_allocagte: still none free?");
837 if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
838 (pmegp->pmeg_index == SEGINV) ||
839 (pmegp->pmeg_vpages))
840 panic("pmeg_allocate: bad pmegp=%p", pmegp);
841 #endif
842 #ifdef PMAP_DEBUG
843 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
844 db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
845 Debugger();
846 }
847 #endif
848
849 TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
850
851 /* Reassign this PMEG for the caller. */
852 pmegp->pmeg_owner = pmap;
853 pmegp->pmeg_version = pmap->pm_version;
854 pmegp->pmeg_va = va;
855 pmegp->pmeg_wired = 0;
856 pmegp->pmeg_reserved = 0;
857 pmegp->pmeg_vpages = 0;
858 if (pmap == kernel_pmap) {
859 TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
860 pmegp->pmeg_qstate = PMEGQ_KERNEL;
861 } else {
862 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
863 pmegp->pmeg_qstate = PMEGQ_ACTIVE;
864 }
865 /* Caller will verify that it's empty (if debugging). */
866 return pmegp;
867 }
868
869 /*
870 * Put pmeg on the inactive queue, leaving its contents intact.
871 * This happens when we lose our context. We may reclaim
872 * this pmeg later if it is still in the inactive queue.
873 */
874 static void
pmeg_release(pmeg_t pmegp)875 pmeg_release(pmeg_t pmegp)
876 {
877
878 CHECK_SPL();
879
880 #ifdef DIAGNOSTIC
881 if ((pmegp->pmeg_owner == kernel_pmap) ||
882 (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
883 panic("pmeg_release: bad pmeg=%p", pmegp);
884 #endif
885
886 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
887 pmegp->pmeg_qstate = PMEGQ_INACTIVE;
888 TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
889 }
890
891 /*
892 * Move the pmeg to the free queue from wherever it is.
893 * The pmeg will be clean. It might be in kernel_pmap.
894 */
895 static void
pmeg_free(pmeg_t pmegp)896 pmeg_free(pmeg_t pmegp)
897 {
898
899 CHECK_SPL();
900
901 #ifdef DIAGNOSTIC
902 /* Caller should verify that it's empty. */
903 if (pmegp->pmeg_vpages != 0)
904 panic("pmeg_free: vpages");
905 #endif
906
907 switch (pmegp->pmeg_qstate) {
908 case PMEGQ_ACTIVE:
909 TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
910 break;
911 case PMEGQ_INACTIVE:
912 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
913 break;
914 case PMEGQ_KERNEL:
915 TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
916 break;
917 default:
918 panic("pmeg_free: releasing bad pmeg");
919 break;
920 }
921
922 #ifdef PMAP_DEBUG
923 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
924 db_printf("pmeg_free: watch pmeg 0x%x\n",
925 pmegp->pmeg_index);
926 Debugger();
927 }
928 #endif
929
930 pmegp->pmeg_owner = NULL;
931 pmegp->pmeg_qstate = PMEGQ_FREE;
932 TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
933 }
934
935 /*
936 * Find a PMEG that was put on the inactive queue when we
937 * had our context stolen. If found, move to active queue.
938 */
939 static pmeg_t
pmeg_cache(pmap_t pmap,vaddr_t va)940 pmeg_cache(pmap_t pmap, vaddr_t va)
941 {
942 int sme, segnum;
943 pmeg_t pmegp;
944
945 CHECK_SPL();
946
947 #ifdef DIAGNOSTIC
948 if (pmap == kernel_pmap)
949 panic("pmeg_cache: kernel_pmap");
950 if (va & SEGOFSET) {
951 panic("pmap:pmeg_cache: va=0x%lx", va);
952 }
953 #endif
954
955 if (pmap->pm_segmap == NULL)
956 return PMEG_NULL;
957
958 segnum = VA_SEGNUM(va);
959 if (segnum > NUSEG) /* out of range */
960 return PMEG_NULL;
961
962 sme = pmap->pm_segmap[segnum];
963 if (sme == SEGINV) /* nothing cached */
964 return PMEG_NULL;
965
966 pmegp = pmeg_p(sme);
967
968 #ifdef PMAP_DEBUG
969 if (pmegp->pmeg_index == pmap_db_watchpmeg) {
970 db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
971 Debugger();
972 }
973 #endif
974
975 /*
976 * Our segmap named a PMEG. If it is no longer ours,
977 * invalidate that entry in our segmap and return NULL.
978 */
979 if ((pmegp->pmeg_owner != pmap) ||
980 (pmegp->pmeg_version != pmap->pm_version) ||
981 (pmegp->pmeg_va != va))
982 {
983 #ifdef PMAP_DEBUG
984 db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
985 pmeg_print(pmegp);
986 Debugger();
987 #endif
988 pmap->pm_segmap[segnum] = SEGINV;
989 return PMEG_NULL; /* cache lookup failed */
990 }
991
992 #ifdef DIAGNOSTIC
993 /* Make sure it is on the inactive queue. */
994 if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
995 panic("pmeg_cache: pmeg was taken: %p", pmegp);
996 #endif
997
998 TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
999 pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1000 TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1001
1002 return pmegp;
1003 }
1004
1005 #ifdef PMAP_DEBUG
1006 static void
pmeg_verify_empty(vaddr_t va)1007 pmeg_verify_empty(vaddr_t va)
1008 {
1009 vaddr_t eva;
1010 int pte;
1011
1012 for (eva = va + NBSG; va < eva; va += PAGE_SIZE) {
1013 pte = get_pte(va);
1014 if (pte & PG_VALID)
1015 panic("pmeg_verify_empty");
1016 }
1017 }
1018 #endif /* PMAP_DEBUG */
1019
1020
1021 /****************************************************************
1022 * Physical-to-virutal lookup support
1023 *
1024 * Need memory for the pv_alloc/pv_free list heads
1025 * and elements. We know how many to allocate since
1026 * there is one list head for each physical page, and
1027 * at most one element for each PMEG slot.
1028 */
1029 static void
pv_init(void)1030 pv_init(void)
1031 {
1032 int npp, nvp, sz;
1033 pv_entry_t pv;
1034 char *p;
1035
1036 /* total allocation size */
1037 sz = 0;
1038
1039 /*
1040 * Data for each physical page.
1041 * Each "mod/ref" flag is a char.
1042 * Each PV head is a pointer.
1043 * Note physmem is in pages.
1044 */
1045 npp = ALIGN(physmem);
1046 sz += (npp * sizeof(*pv_flags_tbl));
1047 sz += (npp * sizeof(*pv_head_tbl));
1048
1049 /*
1050 * Data for each virtual page (all PMEGs).
1051 * One pv_entry for each page frame.
1052 */
1053 nvp = NPMEG * NPAGSEG;
1054 sz += (nvp * sizeof(*pv_free_list));
1055
1056 /* Now allocate the whole thing. */
1057 sz = m68k_round_page(sz);
1058 p = (char *)uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED);
1059 if (p == NULL)
1060 panic("pmap:pv_init: alloc failed");
1061 memset(p, 0, sz);
1062
1063 /* Now divide up the space. */
1064 pv_flags_tbl = (void *) p;
1065 p += (npp * sizeof(*pv_flags_tbl));
1066 pv_head_tbl = (void*) p;
1067 p += (npp * sizeof(*pv_head_tbl));
1068 pv_free_list = (void *)p;
1069 p += (nvp * sizeof(*pv_free_list));
1070
1071 /* Finally, make pv_free_list into a list. */
1072 for (pv = pv_free_list; (char *)pv < p; pv++)
1073 pv->pv_next = &pv[1];
1074 pv[-1].pv_next = 0;
1075
1076 pv_initialized++;
1077 }
1078
1079 /*
1080 * Set or clear bits in all PTEs mapping a page.
1081 * Also does syncflags work while we are there...
1082 */
1083 static void
pv_changepte(paddr_t pa,int set_bits,int clear_bits)1084 pv_changepte(paddr_t pa, int set_bits, int clear_bits)
1085 {
1086 pv_entry_t *head, pv;
1087 u_char *pv_flags;
1088 pmap_t pmap;
1089 vaddr_t va;
1090 int pte, sme;
1091 int saved_ctx;
1092 bool in_ctx;
1093 u_int flags;
1094
1095 pv_flags = pa_to_pvflags(pa);
1096 head = pa_to_pvhead(pa);
1097
1098 /* If no mappings, no work to do. */
1099 if (*head == NULL)
1100 return;
1101
1102 #ifdef DIAGNOSTIC
1103 /* This function should only clear these bits: */
1104 if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1105 panic("pv_changepte: clear=0x%x", clear_bits);
1106 #endif
1107
1108 flags = 0;
1109 saved_ctx = get_context();
1110 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1111 pmap = pv->pv_pmap;
1112 va = pv->pv_va;
1113
1114 #ifdef DIAGNOSTIC
1115 if (pmap->pm_segmap == NULL)
1116 panic("pv_changepte: null segmap");
1117 #endif
1118
1119 /* Is the PTE currently accessible in some context? */
1120 in_ctx = false;
1121 sme = SEGINV; /* kill warning */
1122 if (pmap == kernel_pmap) {
1123 set_context(KERNEL_CONTEXT);
1124 in_ctx = true;
1125 }
1126 else if (has_context(pmap)) {
1127 /* PMEG may be inactive. */
1128 set_context(pmap->pm_ctxnum);
1129 sme = get_segmap(va);
1130 if (sme != SEGINV)
1131 in_ctx = true;
1132 }
1133
1134 if (in_ctx == true) {
1135 /*
1136 * The PTE is in the current context.
1137 * Make sure PTE is up-to-date with VAC.
1138 */
1139 #ifdef HAVECACHE
1140 if (cache_size)
1141 cache_flush_page(va);
1142 #endif
1143 pte = get_pte(va);
1144 } else {
1145
1146 /*
1147 * The PTE is not in any context.
1148 */
1149
1150 sme = pmap->pm_segmap[VA_SEGNUM(va)];
1151 #ifdef DIAGNOSTIC
1152 if (sme == SEGINV)
1153 panic("pv_changepte: SEGINV");
1154 #endif
1155 pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1156 }
1157
1158 #ifdef DIAGNOSTIC
1159 /* PV entries point only to valid mappings. */
1160 if ((pte & PG_VALID) == 0)
1161 panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1162 #endif
1163 /* Get these while it's easy. */
1164 if (pte & PG_MODREF) {
1165 flags |= (pte & PG_MODREF);
1166 pte &= ~PG_MODREF;
1167 }
1168
1169 /* Finally, set and clear some bits. */
1170 pte |= set_bits;
1171 pte &= ~clear_bits;
1172
1173 if (in_ctx == true) {
1174 /* Did cache flush above. */
1175 set_pte(va, pte);
1176 } else {
1177 set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1178 }
1179 }
1180 set_context(saved_ctx);
1181
1182 *pv_flags |= (flags >> PV_SHIFT);
1183 }
1184
1185 /*
1186 * Return ref and mod bits from pvlist,
1187 * and turns off same in hardware PTEs.
1188 */
1189 static u_int
pv_syncflags(pv_entry_t pv)1190 pv_syncflags(pv_entry_t pv)
1191 {
1192 pmap_t pmap;
1193 vaddr_t va;
1194 int pte, sme;
1195 int saved_ctx;
1196 bool in_ctx;
1197 u_int flags;
1198
1199 /* If no mappings, no work to do. */
1200 if (pv == NULL)
1201 return (0);
1202
1203 flags = 0;
1204 saved_ctx = get_context();
1205 for (; pv != NULL; pv = pv->pv_next) {
1206 pmap = pv->pv_pmap;
1207 va = pv->pv_va;
1208 sme = SEGINV;
1209
1210 #ifdef DIAGNOSTIC
1211 /*
1212 * Only the head may have a null pmap, and
1213 * we checked for that above.
1214 */
1215 if (pmap->pm_segmap == NULL)
1216 panic("pv_syncflags: null segmap");
1217 #endif
1218
1219 /* Is the PTE currently accessible in some context? */
1220 in_ctx = false;
1221 if (pmap == kernel_pmap) {
1222 set_context(KERNEL_CONTEXT);
1223 in_ctx = true;
1224 }
1225 else if (has_context(pmap)) {
1226 /* PMEG may be inactive. */
1227 set_context(pmap->pm_ctxnum);
1228 sme = get_segmap(va);
1229 if (sme != SEGINV)
1230 in_ctx = true;
1231 }
1232
1233 if (in_ctx == true) {
1234
1235 /*
1236 * The PTE is in the current context.
1237 * Make sure PTE is up-to-date with VAC.
1238 */
1239
1240 #ifdef HAVECACHE
1241 if (cache_size)
1242 cache_flush_page(va);
1243 #endif
1244 pte = get_pte(va);
1245 } else {
1246
1247 /*
1248 * The PTE is not in any context.
1249 */
1250
1251 sme = pmap->pm_segmap[VA_SEGNUM(va)];
1252 #ifdef DIAGNOSTIC
1253 if (sme == SEGINV)
1254 panic("pv_syncflags: SEGINV");
1255 #endif
1256 pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1257 }
1258
1259 #ifdef DIAGNOSTIC
1260 /* PV entries point only to valid mappings. */
1261 if ((pte & PG_VALID) == 0)
1262 panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1263 #endif
1264 /* OK, do what we came here for... */
1265 if (pte & PG_MODREF) {
1266 flags |= (pte & PG_MODREF);
1267 pte &= ~PG_MODREF;
1268 }
1269
1270 if (in_ctx == true) {
1271 /* Did cache flush above. */
1272 set_pte(va, pte);
1273 } else {
1274 set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1275 }
1276 }
1277 set_context(saved_ctx);
1278
1279 return (flags >> PV_SHIFT);
1280 }
1281
1282 /* Remove all mappings for the physical page. */
1283 static void
pv_remove_all(paddr_t pa)1284 pv_remove_all(paddr_t pa)
1285 {
1286 pv_entry_t *head, pv;
1287 pmap_t pmap;
1288 vaddr_t va;
1289
1290 CHECK_SPL();
1291
1292 #ifdef PMAP_DEBUG
1293 if (pmap_debug & PMD_REMOVE)
1294 printf("pv_remove_all(0x%lx)\n", pa);
1295 #endif
1296
1297 head = pa_to_pvhead(pa);
1298 while ((pv = *head) != NULL) {
1299 pmap = pv->pv_pmap;
1300 va = pv->pv_va;
1301 pmap_remove1(pmap, va, va + PAGE_SIZE);
1302 #ifdef PMAP_DEBUG
1303 /* Make sure it went away. */
1304 if (pv == *head) {
1305 db_printf("pv_remove_all: "
1306 "head unchanged for pa=0x%lx\n", pa);
1307 Debugger();
1308 }
1309 #endif
1310 }
1311 }
1312
1313 /*
1314 * The pmap system is asked to lookup all mappings that point to a
1315 * given physical memory address. This function adds a new element
1316 * to the list of mappings maintained for the given physical address.
1317 * Returns PV_NC if the (new) pvlist says that the address cannot
1318 * be cached.
1319 */
1320 static int
pv_link(pmap_t pmap,int pte,vaddr_t va)1321 pv_link(pmap_t pmap, int pte, vaddr_t va)
1322 {
1323 paddr_t pa;
1324 pv_entry_t *head, pv;
1325 u_char *pv_flags;
1326
1327 if (!pv_initialized)
1328 return 0;
1329
1330 CHECK_SPL();
1331
1332 pa = PG_PA(pte);
1333
1334 #ifdef PMAP_DEBUG
1335 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1336 printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1337 /* pv_print(pa); */
1338 }
1339 #endif
1340
1341 pv_flags = pa_to_pvflags(pa);
1342 head = pa_to_pvhead(pa);
1343
1344 #ifdef DIAGNOSTIC
1345 /* See if this mapping is already in the list. */
1346 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1347 if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1348 panic("pv_link: duplicate entry for PA=0x%lx", pa);
1349 }
1350 #endif
1351
1352 /* Only the non-cached bit is of interest here. */
1353 int flags = (pte & (PG_NC | PG_MODREF)) >> PV_SHIFT;
1354 *pv_flags |= flags;
1355
1356 #ifdef HAVECACHE
1357 /*
1358 * Does this new mapping cause VAC alias problems?
1359 */
1360 if ((*pv_flags & PV_NC) == 0) {
1361 for (pv = *head; pv != NULL; pv = pv->pv_next) {
1362 if (BADALIAS(va, pv->pv_va)) {
1363 *pv_flags |= PV_NC;
1364 pv_changepte(pa, PG_NC, 0);
1365 pmap_stats.ps_vac_uncached++;
1366 break;
1367 }
1368 }
1369 }
1370 #endif
1371
1372 /* Allocate a PV element (pv_alloc()). */
1373 pv = pv_free_list;
1374 if (pv == NULL)
1375 panic("pv_link: pv_alloc");
1376 pv_free_list = pv->pv_next;
1377 pv->pv_next = 0;
1378
1379 /* Insert new entry at the head. */
1380 pv->pv_pmap = pmap;
1381 pv->pv_va = va;
1382 pv->pv_next = *head;
1383 *head = pv;
1384
1385 return (*pv_flags & PV_NC);
1386 }
1387
1388 /*
1389 * pv_unlink is a helper function for pmap_remove.
1390 * It removes the appropriate (pmap, pa, va) entry.
1391 *
1392 * Once the entry is removed, if the pv_table head has the cache
1393 * inhibit bit set, see if we can turn that off; if so, walk the
1394 * pvlist and turn off PG_NC in each PTE. (The pvlist is by
1395 * definition nonempty, since it must have at least two elements
1396 * in it to have PV_NC set, and we only remove one here.)
1397 */
1398 static void
pv_unlink(pmap_t pmap,int pte,vaddr_t va)1399 pv_unlink(pmap_t pmap, int pte, vaddr_t va)
1400 {
1401 paddr_t pa;
1402 pv_entry_t *head, *ppv, pv;
1403 u_char *pv_flags;
1404
1405 CHECK_SPL();
1406
1407 pa = PG_PA(pte);
1408 #ifdef PMAP_DEBUG
1409 if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1410 printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1411 /* pv_print(pa); */
1412 }
1413 #endif
1414
1415 pv_flags = pa_to_pvflags(pa);
1416 head = pa_to_pvhead(pa);
1417
1418 /*
1419 * Find the entry.
1420 */
1421 ppv = head;
1422 pv = *ppv;
1423 while (pv) {
1424 if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1425 goto found;
1426 ppv = &pv->pv_next;
1427 pv = pv->pv_next;
1428 }
1429 #ifdef PMAP_DEBUG
1430 db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
1431 Debugger();
1432 #endif
1433 return;
1434
1435 found:
1436 /* Unlink this entry from the list and clear it. */
1437 *ppv = pv->pv_next;
1438 pv->pv_pmap = NULL;
1439 pv->pv_va = 0;
1440
1441 /* Insert it on the head of the free list. (pv_free()) */
1442 pv->pv_next = pv_free_list;
1443 pv_free_list = pv;
1444 pv = NULL;
1445
1446 /* Do any non-cached mappings remain? */
1447 if ((*pv_flags & PV_NC) == 0)
1448 return;
1449 if ((pv = *head) == NULL)
1450 return;
1451
1452 /*
1453 * Have non-cached mappings. See if we can fix that now.
1454 */
1455 va = pv->pv_va;
1456 for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
1457 /* If there is a DVMA mapping, leave it NC. */
1458 if (va >= DVMA_MAP_BASE)
1459 return;
1460 /* If there are VAC alias problems, leave NC. */
1461 if (BADALIAS(va, pv->pv_va))
1462 return;
1463 }
1464 /* OK, there are no "problem" mappings. */
1465 *pv_flags &= ~PV_NC;
1466 pv_changepte(pa, 0, PG_NC);
1467 pmap_stats.ps_vac_recached++;
1468 }
1469
1470
1471 /****************************************************************
1472 * Bootstrap and Initialization, etc.
1473 */
1474
1475 void
pmap_common_init(pmap_t pmap)1476 pmap_common_init(pmap_t pmap)
1477 {
1478 memset(pmap, 0, sizeof(struct pmap));
1479 pmap->pm_refcount = 1;
1480 pmap->pm_version = pmap_version++;
1481 pmap->pm_ctxnum = EMPTY_CONTEXT;
1482 }
1483
1484 /*
1485 * Prepare the kernel for VM operations.
1486 * This is called by locore2.c:_vm_init()
1487 * after the "start/end" globals are set.
1488 * This function must NOT leave context zero.
1489 */
1490 void
pmap_bootstrap(vaddr_t nextva)1491 pmap_bootstrap(vaddr_t nextva)
1492 {
1493 vaddr_t va, eva;
1494 int i, pte, sme;
1495 extern char etext[];
1496
1497 nextva = m68k_round_page(nextva);
1498
1499 /* Steal some special-purpose, already mapped pages? */
1500
1501 /*
1502 * Determine the range of kernel virtual space available.
1503 * It is segment-aligned to simplify PMEG management.
1504 */
1505 virtual_avail = sun2_round_seg(nextva);
1506 virtual_end = VM_MAX_KERNEL_ADDRESS;
1507
1508 /*
1509 * Determine the range of physical memory available.
1510 */
1511 avail_start = nextva;
1512 avail_end = prom_memsize();
1513 avail_end = m68k_trunc_page(avail_end);
1514
1515 /*
1516 * Report the actual amount of physical memory,
1517 * even though the PROM takes a few pages.
1518 */
1519 physmem = (btoc(avail_end) + 0xF) & ~0xF;
1520
1521 /*
1522 * Done allocating PAGES of virtual space, so
1523 * clean out the rest of the last used segment.
1524 */
1525 for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
1526 set_pte(va, PG_INVAL);
1527
1528 /*
1529 * Now that we are done stealing physical pages, etc.
1530 * figure out which PMEGs are used by those mappings
1531 * and either reserve them or clear them out.
1532 * -- but first, init PMEG management.
1533 * This puts all PMEGs in the free list.
1534 * We will allocte the in-use ones.
1535 */
1536 pmeg_init();
1537
1538 /*
1539 * Reserve PMEGS for kernel text/data/bss
1540 * and the misc pages taken above.
1541 * VA range: [KERNBASE .. virtual_avail]
1542 */
1543 for (va = KERNBASE; va < virtual_avail; va += NBSG) {
1544 sme = get_segmap(va);
1545 if (sme == SEGINV) {
1546 prom_printf("kernel text/data/bss not mapped\n");
1547 prom_abort();
1548 }
1549 pmeg_reserve(sme);
1550 }
1551
1552 /*
1553 * Unmap kernel virtual space. Make sure to leave no valid
1554 * segmap entries in the MMU unless pmeg_array records them.
1555 * VA range: [vseg_avail .. virtual_end]
1556 */
1557 for ( ; va < virtual_end; va += NBSG)
1558 set_segmap(va, SEGINV);
1559
1560 /*
1561 * Reserve PMEGs used by the PROM monitor (device mappings).
1562 * Free up any pmegs in this range which have no mappings.
1563 * VA range: [0x00E00000 .. 0x00F00000]
1564 */
1565 pmeg_mon_init(SUN2_MONSTART, SUN2_MONEND, true);
1566
1567 /*
1568 * Unmap any pmegs left in DVMA space by the PROM.
1569 * DO NOT kill the last one! (owned by the PROM!)
1570 * VA range: [0x00F00000 .. 0x00FE0000]
1571 */
1572 pmeg_mon_init(SUN2_MONEND, SUN2_MONEND + DVMA_MAP_SIZE, false);
1573
1574 /*
1575 * Done reserving PMEGs and/or clearing out mappings.
1576 *
1577 * Now verify the mapping protections and such for the
1578 * important parts of the address space (in VA order).
1579 * Note that the Sun PROM usually leaves the memory
1580 * mapped with everything non-cached...
1581 */
1582
1583 /*
1584 * On a Sun2, the boot loader loads the kernel exactly where
1585 * it is linked, at physical/virtual 0x6000 (KERNBASE). This
1586 * means there are twelve physical/virtual pages before the
1587 * kernel text begins.
1588 */
1589 va = 0;
1590
1591 /*
1592 * Physical/virtual pages zero through three are used by the
1593 * PROM. prom_init has already saved the PTEs, but we don't
1594 * want to unmap the pages until we've installed our own
1595 * vector table - just in case something happens before then
1596 * and we drop into the PROM.
1597 */
1598 eva = va + PAGE_SIZE * 4;
1599 va = eva;
1600
1601 /*
1602 * We use pages four through seven for the msgbuf.
1603 */
1604 eva = va + PAGE_SIZE * 4;
1605 for(; va < eva; va += PAGE_SIZE) {
1606 pte = get_pte(va);
1607 pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1608 set_pte(va, pte);
1609 }
1610 /* Initialize msgbufaddr later, in machdep.c */
1611
1612 /*
1613 * On the Sun3, two of the three dead pages in SUN3_MONSHORTSEG
1614 * are used for tmp_vpages. The Sun2 doesn't have this
1615 * short-segment concept, so we reserve virtual pages eight
1616 * and nine for this.
1617 */
1618 set_pte(va, PG_INVAL);
1619 va += PAGE_SIZE;
1620 set_pte(va, PG_INVAL);
1621 va += PAGE_SIZE;
1622
1623 /*
1624 * Pages ten and eleven remain for the temporary kernel stack,
1625 * which is set up by locore.s. Hopefully this is enough space.
1626 */
1627 eva = va + PAGE_SIZE * 2;
1628 for(; va < eva ; va += PAGE_SIZE) {
1629 pte = get_pte(va);
1630 pte &= ~(PG_NC);
1631 pte |= (PG_SYSTEM | PG_WRITE);
1632 set_pte(va, pte);
1633 }
1634
1635 /*
1636 * Next is the kernel text.
1637 *
1638 * Verify protection bits on kernel text/data/bss
1639 * All of kernel text, data, and bss are cached.
1640 * Text is read-only (except in db_write_ktext).
1641 */
1642 eva = m68k_trunc_page(etext);
1643 while (va < eva) {
1644 pte = get_pte(va);
1645 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1646 prom_printf("invalid page at 0x%x\n", va);
1647 }
1648 pte &= ~(PG_WRITE|PG_NC);
1649 /* Kernel text is read-only */
1650 pte |= (PG_SYSTEM);
1651 set_pte(va, pte);
1652 va += PAGE_SIZE;
1653 }
1654 /* data, bss, etc. */
1655 while (va < nextva) {
1656 pte = get_pte(va);
1657 if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1658 prom_printf("invalid page at 0x%x\n", va);
1659 }
1660 pte &= ~(PG_NC);
1661 pte |= (PG_SYSTEM | PG_WRITE);
1662 set_pte(va, pte);
1663 va += PAGE_SIZE;
1664 }
1665
1666 /*
1667 * Initialize all of the other contexts.
1668 */
1669 #ifdef DIAGNOSTIC
1670 /* Near the beginning of locore.s we set context zero. */
1671 if (get_context() != 0) {
1672 prom_printf("pmap_bootstrap: not in context zero?\n");
1673 prom_abort();
1674 }
1675 #endif /* DIAGNOSTIC */
1676 for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1677 for (i = 1; i < NCONTEXT; i++) {
1678 set_context(i);
1679 set_segmap(va, SEGINV);
1680 }
1681 }
1682 set_context(KERNEL_CONTEXT);
1683
1684 /*
1685 * Reserve a segment for the kernel to use to access a pmeg
1686 * that is not currently mapped into any context/segmap.
1687 * The kernel temporarily maps such a pmeg into this segment.
1688 */
1689 temp_seg_va = virtual_avail;
1690 virtual_avail += NBSG;
1691 #ifdef DIAGNOSTIC
1692 if (temp_seg_va & SEGOFSET) {
1693 prom_printf("pmap_bootstrap: temp_seg_va\n");
1694 prom_abort();
1695 }
1696 #endif
1697
1698 /* Initialization for pmap_next_page() */
1699 avail_next = avail_start;
1700
1701 uvmexp.pagesize = NBPG;
1702 uvm_md_init();
1703
1704 /* after setting up some structures */
1705
1706 pmap_common_init(kernel_pmap);
1707 pmap_kernel_init(kernel_pmap);
1708
1709 context_init();
1710
1711 pmeg_clean_free();
1712
1713 pmap_page_upload();
1714 }
1715
1716 /*
1717 * Give the kernel pmap a segmap, just so there are not
1718 * so many special cases required. Maybe faster too,
1719 * because this lets pmap_remove() and pmap_protect()
1720 * use a S/W copy of the segmap to avoid function calls.
1721 */
1722 void
pmap_kernel_init(pmap_t pmap)1723 pmap_kernel_init(pmap_t pmap)
1724 {
1725 vaddr_t va;
1726 int i, sme;
1727
1728 for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
1729 sme = get_segmap(va);
1730 kernel_segmap[i] = sme;
1731 }
1732 pmap->pm_segmap = kernel_segmap;
1733 }
1734
1735
1736 /****************************************************************
1737 * PMAP interface functions.
1738 */
1739
1740 /*
1741 * Support functions for vm_page_bootstrap().
1742 */
1743
1744 /*
1745 * How much virtual space does this kernel have?
1746 * (After mapping kernel text, data, etc.)
1747 */
1748 void
pmap_virtual_space(vaddr_t * v_start,vaddr_t * v_end)1749 pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1750 {
1751 *v_start = virtual_avail;
1752 *v_end = virtual_end;
1753 }
1754
1755 /* Provide memory to the VM system. */
1756 static void
pmap_page_upload(void)1757 pmap_page_upload(void)
1758 {
1759 int a, b, c, d;
1760
1761 if (hole_size) {
1762 /*
1763 * Supply the memory in two segments so the
1764 * reserved memory (3/50 video ram at 1MB)
1765 * can be carved from the front of the 2nd.
1766 */
1767 a = atop(avail_start);
1768 b = atop(hole_start);
1769 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1770 c = atop(hole_start + hole_size);
1771 d = atop(avail_end);
1772 uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1773 } else {
1774 a = atop(avail_start);
1775 d = atop(avail_end);
1776 uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1777 }
1778 }
1779
1780 /*
1781 * Initialize the pmap module.
1782 * Called by vm_init, to initialize any structures that the pmap
1783 * system needs to map virtual memory.
1784 */
1785 void
pmap_init(void)1786 pmap_init(void)
1787 {
1788 pv_init();
1789
1790 /* Initialize the pmap pool. */
1791 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1792 &pool_allocator_nointr, IPL_NONE);
1793 }
1794
1795 /*
1796 * Map a range of kernel virtual address space.
1797 * This might be used for device mappings, or to
1798 * record the mapping for kernel text/data/bss.
1799 * Return VA following the mapped range.
1800 */
1801 vaddr_t
pmap_map(vaddr_t va,paddr_t pa,paddr_t endpa,int prot)1802 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
1803 {
1804 int sz;
1805
1806 sz = endpa - pa;
1807 do {
1808 pmap_enter(kernel_pmap, va, pa, prot, 0);
1809 va += PAGE_SIZE;
1810 pa += PAGE_SIZE;
1811 sz -= PAGE_SIZE;
1812 } while (sz > 0);
1813 pmap_update(kernel_pmap);
1814 return(va);
1815 }
1816
1817 void
pmap_user_init(pmap_t pmap)1818 pmap_user_init(pmap_t pmap)
1819 {
1820 int i;
1821 pmap->pm_segmap = kmem_alloc(sizeof(char)*NUSEG, KM_SLEEP);
1822 for (i = 0; i < NUSEG; i++) {
1823 pmap->pm_segmap[i] = SEGINV;
1824 }
1825 }
1826
1827 /*
1828 * Create and return a physical map.
1829 *
1830 * If the size specified for the map
1831 * is zero, the map is an actual physical
1832 * map, and may be referenced by the
1833 * hardware.
1834 *
1835 * If the size specified is non-zero,
1836 * the map will be used in software only, and
1837 * is bounded by that size.
1838 */
1839 pmap_t
pmap_create(void)1840 pmap_create(void)
1841 {
1842 pmap_t pmap;
1843
1844 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1845 pmap_pinit(pmap);
1846 return pmap;
1847 }
1848
1849 /*
1850 * Release any resources held by the given physical map.
1851 * Called when a pmap initialized by pmap_pinit is being released.
1852 * Should only be called if the map contains no valid mappings.
1853 */
1854 void
pmap_release(struct pmap * pmap)1855 pmap_release(struct pmap *pmap)
1856 {
1857 int s;
1858
1859 s = splvm();
1860
1861 if (pmap == kernel_pmap)
1862 panic("pmap_release: kernel_pmap!");
1863
1864 if (has_context(pmap)) {
1865 #ifdef PMAP_DEBUG
1866 if (pmap_debug & PMD_CONTEXT)
1867 printf("pmap_release(%p): free ctx %d\n",
1868 pmap, pmap->pm_ctxnum);
1869 #endif
1870 context_free(pmap);
1871 }
1872 kmem_free(pmap->pm_segmap, sizeof(char)*NUSEG);
1873 pmap->pm_segmap = NULL;
1874
1875 splx(s);
1876 }
1877
1878
1879 /*
1880 * Retire the given physical map from service.
1881 * Should only be called if the map contains
1882 * no valid mappings.
1883 */
1884 void
pmap_destroy(pmap_t pmap)1885 pmap_destroy(pmap_t pmap)
1886 {
1887 int count;
1888
1889 #ifdef PMAP_DEBUG
1890 if (pmap_debug & PMD_CREATE)
1891 printf("pmap_destroy(%p)\n", pmap);
1892 #endif
1893 if (pmap == kernel_pmap)
1894 panic("pmap_destroy: kernel_pmap!");
1895 count = atomic_dec_uint_nv(&pmap->pm_refcount);
1896 if (count == 0) {
1897 pmap_release(pmap);
1898 pool_put(&pmap_pmap_pool, pmap);
1899 }
1900 }
1901
1902 /*
1903 * Add a reference to the specified pmap.
1904 */
1905 void
pmap_reference(pmap_t pmap)1906 pmap_reference(pmap_t pmap)
1907 {
1908
1909 atomic_inc_uint(&pmap->pm_refcount);
1910 }
1911
1912
1913 /*
1914 * Insert the given physical page (p) at
1915 * the specified virtual address (v) in the
1916 * target physical map with the protection requested.
1917 *
1918 * The physical address is page aligned, but may have some
1919 * low bits set indicating an OBIO or VME bus page, or just
1920 * that the non-cache bit should be set (i.e PMAP_NC).
1921 *
1922 * If specified, the page will be wired down, meaning
1923 * that the related pte can not be reclaimed.
1924 *
1925 * NB: This is the only routine which MAY NOT lazy-evaluate
1926 * or lose information. That is, this routine must actually
1927 * insert this page into the given map NOW.
1928 */
1929 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1930 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1931 {
1932 int new_pte, s;
1933 bool wired = (flags & PMAP_WIRED) != 0;
1934
1935 #ifdef PMAP_DEBUG
1936 if ((pmap_debug & PMD_ENTER) ||
1937 (va == pmap_db_watchva))
1938 printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1939 pmap, va, pa, prot, wired);
1940 #endif
1941
1942 /* Get page-type bits from low part of the PA... */
1943 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
1944
1945 /* ...now the valid and writable bits... */
1946 new_pte |= PG_VALID;
1947 if (prot & VM_PROT_WRITE)
1948 new_pte |= PG_WRITE;
1949 if (flags & VM_PROT_ALL) {
1950 new_pte |= PG_REF;
1951 if (flags & VM_PROT_WRITE) {
1952 new_pte |= PG_MOD;
1953 }
1954 }
1955
1956 /* ...and finally the page-frame number. */
1957 new_pte |= PA_PGNUM(pa);
1958
1959 /*
1960 * treatment varies significantly:
1961 * kernel ptes are always in the mmu
1962 * user ptes may not necessarily? be in the mmu. pmap may not
1963 * be in the mmu either.
1964 *
1965 */
1966 s = splvm();
1967 if (pmap == kernel_pmap) {
1968 new_pte |= PG_SYSTEM;
1969 pmap_enter_kernel(va, new_pte, wired);
1970 } else {
1971 pmap_enter_user(pmap, va, new_pte, wired);
1972 }
1973 splx(s);
1974 return 0;
1975 }
1976
1977 static void
pmap_enter_kernel(vaddr_t pgva,int new_pte,bool wired)1978 pmap_enter_kernel(vaddr_t pgva, int new_pte, bool wired)
1979 {
1980 pmap_t pmap = kernel_pmap;
1981 pmeg_t pmegp;
1982 int do_pv, old_pte, sme;
1983 vaddr_t segva;
1984 int saved_ctx;
1985
1986 /*
1987 need to handle possibly allocating additional pmegs
1988 need to make sure they cant be stolen from the kernel;
1989 map any new pmegs into context zero, make sure rest of pmeg is null;
1990 deal with pv_stuff; possibly caching problems;
1991 must also deal with changes too.
1992 */
1993 saved_ctx = get_context();
1994 set_context(KERNEL_CONTEXT);
1995
1996 /*
1997 * In detail:
1998 *
1999 * (a) lock pmap
2000 * (b) Is the VA in a already mapped segment, if so
2001 * look to see if that VA address is "valid". If it is, then
2002 * action is a change to an existing pte
2003 * (c) if not mapped segment, need to allocate pmeg
2004 * (d) if adding pte entry or changing physaddr of existing one,
2005 * use pv_stuff, for change, pmap_remove() possibly.
2006 * (e) change/add pte
2007 */
2008
2009 #ifdef DIAGNOSTIC
2010 if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
2011 panic("pmap_enter_kernel: bad va=0x%lx", pgva);
2012 if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
2013 panic("pmap_enter_kernel: bad pte");
2014 #endif
2015
2016 if (pgva >= DVMA_MAP_BASE) {
2017 /* This is DVMA space. Always want it non-cached. */
2018 new_pte |= PG_NC;
2019 }
2020
2021 segva = sun2_trunc_seg(pgva);
2022 do_pv = true;
2023
2024 /* Do we have a PMEG? */
2025 sme = get_segmap(segva);
2026 if (sme != SEGINV) {
2027 /* Found a PMEG in the segmap. Cool. */
2028 pmegp = pmeg_p(sme);
2029 #ifdef DIAGNOSTIC
2030 /* Make sure it is the right PMEG. */
2031 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2032 panic("pmap_enter_kernel: wrong sme at VA=0x%lx",
2033 segva);
2034 /* Make sure it is ours. */
2035 if (pmegp->pmeg_owner != pmap)
2036 panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
2037 #endif
2038 } else {
2039 /* No PMEG in the segmap. Have to allocate one. */
2040 pmegp = pmeg_allocate(pmap, segva);
2041 sme = pmegp->pmeg_index;
2042 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2043 set_segmap(segva, sme);
2044 #ifdef PMAP_DEBUG
2045 pmeg_verify_empty(segva);
2046 if (pmap_debug & PMD_SEGMAP) {
2047 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2048 "(ek)\n", pmap, segva, sme);
2049 }
2050 #endif
2051 /* There are no existing mappings to deal with. */
2052 old_pte = 0;
2053 goto add_pte;
2054 }
2055
2056 /*
2057 * We have a PMEG. Is the VA already mapped to somewhere?
2058 * (a) if so, is it same pa? (really a protection change)
2059 * (b) if not same pa, then we have to unlink from old pa
2060 */
2061 old_pte = get_pte(pgva);
2062 if ((old_pte & PG_VALID) == 0)
2063 goto add_pte;
2064
2065 /* Have valid translation. Flush cache before changing it. */
2066 #ifdef HAVECACHE
2067 if (cache_size) {
2068 cache_flush_page(pgva);
2069 /* Get fresh mod/ref bits from write-back. */
2070 old_pte = get_pte(pgva);
2071 }
2072 #endif
2073
2074 /* XXX - removing valid page here, way lame... -glass */
2075 pmegp->pmeg_vpages--;
2076
2077 if (!IS_MAIN_MEM(old_pte)) {
2078 /* Was not main memory, so no pv_entry for it. */
2079 goto add_pte;
2080 }
2081
2082 /* Old mapping was main memory. Save mod/ref bits. */
2083 save_modref_bits(old_pte);
2084
2085 /*
2086 * If not changing the type or pfnum then re-use pv_entry.
2087 * Note we get here only with old_pte having PGT_OBMEM.
2088 */
2089 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2090 do_pv = false; /* re-use pv_entry */
2091 new_pte |= (old_pte & PG_NC);
2092 goto add_pte;
2093 }
2094
2095 /* OK, different type or PA, have to kill old pv_entry. */
2096 pv_unlink(pmap, old_pte, pgva);
2097
2098 add_pte: /* can be destructive */
2099 pmeg_set_wiring(pmegp, pgva, wired);
2100
2101 /* Anything but MAIN_MEM is mapped non-cached. */
2102 if (!IS_MAIN_MEM(new_pte)) {
2103 new_pte |= PG_NC;
2104 do_pv = false;
2105 }
2106 if (do_pv == true) {
2107 if (pv_link(pmap, new_pte, pgva) & PV_NC)
2108 new_pte |= PG_NC;
2109 }
2110 #ifdef PMAP_DEBUG
2111 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2112 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
2113 "(ek)\n", pmap, pgva, old_pte, new_pte);
2114 }
2115 #endif
2116 /* cache flush done above */
2117 set_pte(pgva, new_pte);
2118 set_context(saved_ctx);
2119 pmegp->pmeg_vpages++;
2120 }
2121
2122
2123 static void
pmap_enter_user(pmap_t pmap,vaddr_t pgva,int new_pte,bool wired)2124 pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, bool wired)
2125 {
2126 int do_pv, old_pte, sme;
2127 vaddr_t segva;
2128 pmeg_t pmegp;
2129
2130 #ifdef DIAGNOSTIC
2131 if (pgva >= VM_MAXUSER_ADDRESS)
2132 panic("pmap_enter_user: bad va=0x%lx", pgva);
2133 if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
2134 panic("pmap_enter_user: bad pte");
2135 #endif
2136 #ifdef PMAP_DEBUG
2137 /*
2138 * Some user pages are wired here, and a later
2139 * call to pmap_unwire() will unwire them.
2140 * XXX - Need a separate list for wired user pmegs
2141 * so they can not be stolen from the active list.
2142 * XXX - Note: vm_fault.c assumes pmap_extract will
2143 * work on wired mappings, so must preserve them...
2144 * XXX: Maybe keep a list of wired PMEGs?
2145 */
2146 if (wired && (pmap_debug & PMD_WIRING)) {
2147 db_printf("pmap_enter_user: attempt to wire user page, "
2148 "ignored\n");
2149 Debugger();
2150 }
2151 #endif
2152
2153 /* Validate this assumption. */
2154 if (pmap != current_pmap()) {
2155 #ifdef PMAP_DEBUG
2156 /* Apparently, this never happens. */
2157 db_printf("pmap_enter_user: not curlwp\n");
2158 Debugger();
2159 #endif
2160 /* Just throw it out (fault it in later). */
2161 /* XXX: But must remember it if wired... */
2162 return;
2163 }
2164
2165 segva = sun2_trunc_seg(pgva);
2166 do_pv = true;
2167
2168 /*
2169 * If this pmap was sharing the "empty" context,
2170 * allocate a real context for its exclusive use.
2171 */
2172 if (!has_context(pmap)) {
2173 context_allocate(pmap);
2174 #ifdef PMAP_DEBUG
2175 if (pmap_debug & PMD_CONTEXT)
2176 printf("pmap_enter(%p) got context %d\n",
2177 pmap, pmap->pm_ctxnum);
2178 #endif
2179 set_context(pmap->pm_ctxnum);
2180 } else {
2181 #ifdef PMAP_DEBUG
2182 /* Make sure context is correct. */
2183 if (pmap->pm_ctxnum != get_context()) {
2184 db_printf("pmap_enter_user: wrong context\n");
2185 Debugger();
2186 /* XXX: OK to proceed? */
2187 set_context(pmap->pm_ctxnum);
2188 }
2189 #endif
2190 }
2191
2192 /*
2193 * We have a context. Do we have a PMEG?
2194 */
2195 sme = get_segmap(segva);
2196 if (sme != SEGINV) {
2197 /* Found a PMEG in the segmap. Cool. */
2198 pmegp = pmeg_p(sme);
2199 #ifdef DIAGNOSTIC
2200 /* Make sure it is the right PMEG. */
2201 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2202 panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
2203 /* Make sure it is ours. */
2204 if (pmegp->pmeg_owner != pmap)
2205 panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
2206 #endif
2207 } else {
2208 /* Not in the segmap. Try the S/W cache. */
2209 pmegp = pmeg_cache(pmap, segva);
2210 if (pmegp) {
2211 /* Found PMEG in cache. Just reload it. */
2212 sme = pmegp->pmeg_index;
2213 set_segmap(segva, sme);
2214 } else {
2215 /* PMEG not in cache, so allocate one. */
2216 pmegp = pmeg_allocate(pmap, segva);
2217 sme = pmegp->pmeg_index;
2218 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2219 set_segmap(segva, sme);
2220 #ifdef PMAP_DEBUG
2221 pmeg_verify_empty(segva);
2222 #endif
2223 }
2224 #ifdef PMAP_DEBUG
2225 if (pmap_debug & PMD_SEGMAP) {
2226 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2227 "(eu)\n", pmap, segva, sme);
2228 }
2229 #endif
2230 }
2231
2232 /*
2233 * We have a PMEG. Is the VA already mapped to somewhere?
2234 * (a) if so, is it same pa? (really a protection change)
2235 * (b) if not same pa, then we have to unlink from old pa
2236 */
2237 old_pte = get_pte(pgva);
2238 if ((old_pte & PG_VALID) == 0)
2239 goto add_pte;
2240
2241 /* Have valid translation. Flush cache before changing it. */
2242 #ifdef HAVECACHE
2243 if (cache_size) {
2244 cache_flush_page(pgva);
2245 /* Get fresh mod/ref bits from write-back. */
2246 old_pte = get_pte(pgva);
2247 }
2248 #endif
2249
2250 /* XXX - removing valid page here, way lame... -glass */
2251 pmegp->pmeg_vpages--;
2252
2253 if (!IS_MAIN_MEM(old_pte)) {
2254 /* Was not main memory, so no pv_entry for it. */
2255 goto add_pte;
2256 }
2257
2258 /* Old mapping was main memory. Save mod/ref bits. */
2259 save_modref_bits(old_pte);
2260
2261 /*
2262 * If not changing the type or pfnum then re-use pv_entry.
2263 * Note we get here only with old_pte having PGT_OBMEM.
2264 */
2265 if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2266 do_pv = false; /* re-use pv_entry */
2267 new_pte |= (old_pte & PG_NC);
2268 goto add_pte;
2269 }
2270
2271 /* OK, different type or PA, have to kill old pv_entry. */
2272 pv_unlink(pmap, old_pte, pgva);
2273
2274 add_pte:
2275 /* XXX - Wiring changes on user pmaps? */
2276 /* pmeg_set_wiring(pmegp, pgva, wired); */
2277
2278 /* Anything but MAIN_MEM is mapped non-cached. */
2279 if (!IS_MAIN_MEM(new_pte)) {
2280 new_pte |= PG_NC;
2281 do_pv = false;
2282 }
2283 if (do_pv == true) {
2284 if (pv_link(pmap, new_pte, pgva) & PV_NC)
2285 new_pte |= PG_NC;
2286 }
2287 #ifdef PMAP_DEBUG
2288 if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2289 printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
2290 "(eu)\n", pmap, pgva, old_pte, new_pte);
2291 }
2292 #endif
2293 /* cache flush done above */
2294 set_pte(pgva, new_pte);
2295 pmegp->pmeg_vpages++;
2296 }
2297
2298 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)2299 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2300 {
2301 int new_pte, s;
2302 pmap_t pmap = kernel_pmap;
2303 pmeg_t pmegp;
2304 int sme;
2305 vaddr_t segva;
2306 int saved_ctx;
2307
2308 #ifdef PMAP_DEBUG
2309 if ((pmap_debug & PMD_ENTER) ||
2310 (va == pmap_db_watchva))
2311 printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
2312 va, pa, prot);
2313 #endif
2314
2315 /* Get page-type bits from low part of the PA... */
2316 new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2317
2318 /* ...now the valid and writable bits... */
2319 new_pte |= PG_SYSTEM|PG_VALID;
2320 if (prot & VM_PROT_WRITE)
2321 new_pte |= PG_WRITE;
2322
2323 /* ...and finally the page-frame number. */
2324 new_pte |= PA_PGNUM(pa);
2325
2326 /*
2327 * need to handle possibly allocating additional pmegs
2328 * need to make sure they cant be stolen from the kernel;
2329 * map any new pmegs into context zero, make sure rest of pmeg is null;
2330 * deal with pv_stuff; possibly caching problems;
2331 * must also deal with changes too.
2332 */
2333 saved_ctx = get_context();
2334 set_context(KERNEL_CONTEXT);
2335
2336 /*
2337 * In detail:
2338 *
2339 * (a) lock pmap
2340 * (b) Is the VA in a already mapped segment, if so
2341 * look to see if that VA address is "valid". If it is, then
2342 * action is a change to an existing pte
2343 * (c) if not mapped segment, need to allocate pmeg
2344 * (d) change/add pte
2345 */
2346
2347 #ifdef DIAGNOSTIC
2348 if ((va < virtual_avail) || (va >= DVMA_MAP_END))
2349 panic("pmap_kenter_pa: bad va=0x%lx", va);
2350 #endif
2351
2352 if (va >= DVMA_MAP_BASE) {
2353 /* This is DVMA space. Always want it non-cached. */
2354 new_pte |= PG_NC;
2355 }
2356
2357 segva = sun2_trunc_seg(va);
2358
2359 s = splvm();
2360
2361 /* Do we have a PMEG? */
2362 sme = get_segmap(segva);
2363 if (sme != SEGINV) {
2364 KASSERT((get_pte(va) & PG_VALID) == 0);
2365
2366 /* Found a PMEG in the segmap. Cool. */
2367 pmegp = pmeg_p(sme);
2368 #ifdef DIAGNOSTIC
2369 /* Make sure it is the right PMEG. */
2370 if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2371 panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
2372 /* Make sure it is ours. */
2373 if (pmegp->pmeg_owner != pmap)
2374 panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
2375 #endif
2376 } else {
2377
2378 /* No PMEG in the segmap. Have to allocate one. */
2379 pmegp = pmeg_allocate(pmap, segva);
2380 sme = pmegp->pmeg_index;
2381 pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2382 set_segmap(segva, sme);
2383 #ifdef PMAP_DEBUG
2384 pmeg_verify_empty(segva);
2385 if (pmap_debug & PMD_SEGMAP) {
2386 printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
2387 "(ek)\n", pmap, segva, sme);
2388 }
2389 #endif
2390 }
2391
2392 pmeg_set_wiring(pmegp, va, true);
2393
2394 /* Anything but MAIN_MEM is mapped non-cached. */
2395 if (!IS_MAIN_MEM(new_pte)) {
2396 new_pte |= PG_NC;
2397 }
2398 #ifdef PMAP_DEBUG
2399 if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
2400 printf("pmap: set_pte pmap=%p va=0x%lx new=0x%x "
2401 "(ek)\n", pmap, va, new_pte);
2402 }
2403 #endif
2404 /* cache flush done above */
2405 set_pte(va, new_pte);
2406 set_context(saved_ctx);
2407 pmegp->pmeg_vpages++;
2408 splx(s);
2409 }
2410
2411 void
pmap_kremove(vaddr_t va,vsize_t len)2412 pmap_kremove(vaddr_t va, vsize_t len)
2413 {
2414 pmap_t pmap = kernel_pmap;
2415 vaddr_t eva, neva, pgva, segva, segnum;
2416 int pte, sme;
2417 pmeg_t pmegp;
2418 #ifdef HAVECACHE
2419 int flush_by_page = 0;
2420 #endif
2421 int s;
2422 int saved_ctx;
2423
2424 s = splvm();
2425 saved_ctx = get_context();
2426 set_context(KERNEL_CONTEXT);
2427 segnum = VA_SEGNUM(va);
2428 for (eva = va + len; va < eva; va = neva, segnum++) {
2429 neva = sun2_trunc_seg(va) + NBSG;
2430 if (neva > eva) {
2431 neva = eva;
2432 }
2433 if (pmap->pm_segmap[segnum] == SEGINV) {
2434 continue;
2435 }
2436
2437 segva = sun2_trunc_seg(va);
2438 sme = get_segmap(segva);
2439 pmegp = pmeg_p(sme);
2440
2441 #ifdef HAVECACHE
2442 if (cache_size) {
2443
2444 /*
2445 * If the range to be removed is larger than the cache,
2446 * it will be cheaper to flush this segment entirely.
2447 */
2448
2449 if (cache_size < (eva - va)) {
2450 /* cheaper to flush whole segment */
2451 cache_flush_segment(segva);
2452 } else {
2453 flush_by_page = 1;
2454 }
2455 }
2456 #endif
2457
2458 /* Invalidate the PTEs in the given range. */
2459 for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
2460 pte = get_pte(pgva);
2461 if (pte & PG_VALID) {
2462 #ifdef HAVECACHE
2463 if (flush_by_page) {
2464 cache_flush_page(pgva);
2465 /* Get fresh mod/ref bits
2466 from write-back. */
2467 pte = get_pte(pgva);
2468 }
2469 #endif
2470 #ifdef PMAP_DEBUG
2471 if ((pmap_debug & PMD_SETPTE) ||
2472 (pgva == pmap_db_watchva)) {
2473 printf("pmap: set_pte pmap=%p va=0x%lx"
2474 " old=0x%x new=0x%x (rrmmu)\n",
2475 pmap, pgva, pte, PG_INVAL);
2476 }
2477 #endif
2478 set_pte(pgva, PG_INVAL);
2479 KASSERT(pmegp->pmeg_vpages > 0);
2480 pmegp->pmeg_vpages--;
2481 }
2482 }
2483 KASSERT(pmegp->pmeg_vpages >= 0);
2484 if (pmegp->pmeg_vpages == 0) {
2485 /* We are done with this pmeg. */
2486 #ifdef PMAP_DEBUG
2487 if (is_pmeg_wired(pmegp)) {
2488 if (pmap_debug & PMD_WIRING) {
2489 db_printf("pmap: removing wired "
2490 "pmeg: %p\n", pmegp);
2491 Debugger();
2492 }
2493 }
2494 if (pmap_debug & PMD_SEGMAP) {
2495 printf("pmap: set_segmap ctx=%d v=0x%lx "
2496 "old=0x%x new=ff (rm)\n",
2497 pmap->pm_ctxnum, segva,
2498 pmegp->pmeg_index);
2499 }
2500 pmeg_verify_empty(segva);
2501 #endif
2502
2503 /* Remove it from the MMU. */
2504 set_segmap(segva, SEGINV);
2505 pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
2506
2507 /* Now, put it on the free list. */
2508 pmeg_free(pmegp);
2509 }
2510 }
2511 set_context(saved_ctx);
2512 splx(s);
2513 }
2514
2515
2516 /*
2517 * The trap handler calls this so we can try to resolve
2518 * user-level faults by reloading a PMEG.
2519 * If that does not prodce a valid mapping,
2520 * call vm_fault as usual.
2521 *
2522 * XXX: Merge this with the next function?
2523 */
2524 int
_pmap_fault(struct vm_map * map,vaddr_t va,vm_prot_t ftype)2525 _pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype)
2526 {
2527 pmap_t pmap;
2528 int rv;
2529
2530 pmap = vm_map_pmap(map);
2531 if (map == kernel_map) {
2532 /* Do not allow faults below the "managed" space. */
2533 if (va < virtual_avail) {
2534 /*
2535 * Most pages below virtual_avail are read-only,
2536 * so I will assume it is a protection failure.
2537 */
2538 return EACCES;
2539 }
2540 } else {
2541 /* User map. Try reload shortcut. */
2542 if (pmap_fault_reload(pmap, va, ftype))
2543 return 0;
2544 }
2545 rv = uvm_fault(map, va, ftype);
2546
2547 #ifdef PMAP_DEBUG
2548 if (pmap_debug & PMD_FAULT) {
2549 printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
2550 map, va, ftype, rv);
2551 }
2552 #endif
2553
2554 return (rv);
2555 }
2556
2557 /*
2558 * This is a shortcut used by the trap handler to
2559 * reload PMEGs into a user segmap without calling
2560 * the actual VM fault handler. Returns true if:
2561 * the PMEG was reloaded, and
2562 * it has a valid PTE at va.
2563 * Otherwise return zero and let VM code handle it.
2564 */
2565 int
pmap_fault_reload(pmap_t pmap,vaddr_t pgva,vm_prot_t ftype)2566 pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype)
2567 {
2568 int rv, s, pte, chkpte, sme;
2569 vaddr_t segva;
2570 pmeg_t pmegp;
2571
2572 if (pgva >= VM_MAXUSER_ADDRESS)
2573 return (0);
2574 if (pmap->pm_segmap == NULL) {
2575 #ifdef PMAP_DEBUG
2576 db_printf("pmap_fault_reload: null segmap\n");
2577 Debugger();
2578 #endif
2579 return (0);
2580 }
2581
2582 /* Short-cut using the S/W segmap. */
2583 if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
2584 return (0);
2585
2586 segva = sun2_trunc_seg(pgva);
2587 chkpte = PG_VALID;
2588 if (ftype & VM_PROT_WRITE)
2589 chkpte |= PG_WRITE;
2590 rv = 0;
2591
2592 s = splvm();
2593
2594 /*
2595 * Given that we faulted on a user-space address, we will
2596 * probably need a context. Get a context now so we can
2597 * try to resolve the fault with a segmap reload.
2598 */
2599 if (!has_context(pmap)) {
2600 context_allocate(pmap);
2601 #ifdef PMAP_DEBUG
2602 if (pmap_debug & PMD_CONTEXT)
2603 printf("pmap_fault(%p) got context %d\n",
2604 pmap, pmap->pm_ctxnum);
2605 #endif
2606 set_context(pmap->pm_ctxnum);
2607 } else {
2608 #ifdef PMAP_DEBUG
2609 /* Make sure context is correct. */
2610 if (pmap->pm_ctxnum != get_context()) {
2611 db_printf("pmap_fault_reload: wrong context\n");
2612 Debugger();
2613 /* XXX: OK to proceed? */
2614 set_context(pmap->pm_ctxnum);
2615 }
2616 #endif
2617 }
2618
2619 sme = get_segmap(segva);
2620 if (sme == SEGINV) {
2621 /* See if there is something to reload. */
2622 pmegp = pmeg_cache(pmap, segva);
2623 if (pmegp) {
2624 /* Found one! OK, reload it. */
2625 pmap_stats.ps_pmeg_faultin++;
2626 sme = pmegp->pmeg_index;
2627 set_segmap(segva, sme);
2628 pte = get_pte(pgva);
2629 if (pte & chkpte)
2630 rv = 1;
2631 }
2632 }
2633
2634 splx(s);
2635 return (rv);
2636 }
2637
2638
2639 /*
2640 * Clear the modify bit for the given physical page.
2641 */
2642 bool
pmap_clear_modify(struct vm_page * pg)2643 pmap_clear_modify(struct vm_page *pg)
2644 {
2645 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2646 pv_entry_t *head;
2647 u_char *pv_flags;
2648 int s;
2649 bool rv;
2650
2651 pv_flags = pa_to_pvflags(pa);
2652 head = pa_to_pvhead(pa);
2653
2654 s = splvm();
2655 *pv_flags |= pv_syncflags(*head);
2656 rv = *pv_flags & PV_MOD;
2657 *pv_flags &= ~PV_MOD;
2658 splx(s);
2659 return rv;
2660 }
2661
2662 /*
2663 * Tell whether the given physical page has been modified.
2664 */
2665 bool
pmap_is_modified(struct vm_page * pg)2666 pmap_is_modified(struct vm_page *pg)
2667 {
2668 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2669 pv_entry_t *head;
2670 u_char *pv_flags;
2671 int s;
2672 bool rv;
2673
2674 pv_flags = pa_to_pvflags(pa);
2675 head = pa_to_pvhead(pa);
2676
2677 s = splvm();
2678 if ((*pv_flags & PV_MOD) == 0)
2679 *pv_flags |= pv_syncflags(*head);
2680 rv = (*pv_flags & PV_MOD);
2681 splx(s);
2682 return (rv);
2683 }
2684
2685 /*
2686 * Clear the reference bit for the given physical page.
2687 * It's OK to just remove mappings if that's easier.
2688 */
2689 bool
pmap_clear_reference(struct vm_page * pg)2690 pmap_clear_reference(struct vm_page *pg)
2691 {
2692 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2693 pv_entry_t *head;
2694 u_char *pv_flags;
2695 int s;
2696 bool rv;
2697
2698 pv_flags = pa_to_pvflags(pa);
2699 head = pa_to_pvhead(pa);
2700
2701 s = splvm();
2702 *pv_flags |= pv_syncflags(*head);
2703 rv = *pv_flags & PV_REF;
2704 *pv_flags &= ~PV_REF;
2705 splx(s);
2706 return rv;
2707 }
2708
2709 /*
2710 * Tell whether the given physical page has been referenced.
2711 * It's OK to just return false if page is not mapped.
2712 */
2713 bool
pmap_is_referenced(struct vm_page * pg)2714 pmap_is_referenced(struct vm_page *pg)
2715 {
2716 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2717 pv_entry_t *head;
2718 u_char *pv_flags;
2719 int s;
2720 bool rv;
2721
2722 pv_flags = pa_to_pvflags(pa);
2723 head = pa_to_pvhead(pa);
2724
2725 s = splvm();
2726 if ((*pv_flags & PV_REF) == 0)
2727 *pv_flags |= pv_syncflags(*head);
2728 rv = (*pv_flags & PV_REF);
2729 splx(s);
2730 return (rv);
2731 }
2732
2733
2734 /*
2735 * This is called by locore.s:cpu_switch() when it is
2736 * switching to a new process. Load new translations.
2737 */
2738 void
_pmap_switch(pmap_t pmap)2739 _pmap_switch(pmap_t pmap)
2740 {
2741
2742 /*
2743 * Since we maintain completely separate user and kernel address
2744 * spaces, whenever we switch to a process, we need to make sure
2745 * that it has a context allocated.
2746 */
2747 if (!has_context(pmap)) {
2748 context_allocate(pmap);
2749 #ifdef PMAP_DEBUG
2750 if (pmap_debug & PMD_CONTEXT)
2751 printf("_pmap_switch(%p) got context %d\n",
2752 pmap, pmap->pm_ctxnum);
2753 #endif
2754 }
2755 set_context(pmap->pm_ctxnum);
2756 }
2757
2758 /*
2759 * Exported version of pmap_activate(). This is called from the
2760 * machine-independent VM code when a process is given a new pmap.
2761 * If (p == curlwp) do like cpu_switch would do; otherwise just
2762 * take this as notification that the process has a new pmap.
2763 */
2764 void
pmap_activate(struct lwp * l)2765 pmap_activate(struct lwp *l)
2766 {
2767 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2768
2769 if (l->l_proc == curproc) {
2770 _pmap_switch(pmap);
2771 }
2772 }
2773
2774 /*
2775 * Deactivate the address space of the specified process.
2776 */
2777 void
pmap_deactivate(struct lwp * l)2778 pmap_deactivate(struct lwp *l)
2779 {
2780 /* Nothing to do. */
2781 }
2782
2783 /*
2784 * Routine: pmap_unwire
2785 * Function: Clear the wired attribute for a map/virtual-address
2786 * pair.
2787 * In/out conditions:
2788 * The mapping must already exist in the pmap.
2789 */
2790 void
pmap_unwire(pmap_t pmap,vaddr_t va)2791 pmap_unwire(pmap_t pmap, vaddr_t va)
2792 {
2793 int s, sme;
2794 int wiremask, ptenum;
2795 pmeg_t pmegp;
2796 int saved_ctx;
2797
2798 #ifdef PMAP_DEBUG
2799 if (pmap_debug & PMD_WIRING)
2800 printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
2801 pmap, va);
2802 #endif
2803 /*
2804 * We are asked to unwire pages that were wired when
2805 * pmap_enter() was called and we ignored wiring.
2806 * (VM code appears to wire a stack page during fork.)
2807 */
2808 if (pmap != kernel_pmap) {
2809 #ifdef PMAP_DEBUG
2810 if (pmap_debug & PMD_WIRING) {
2811 db_printf(" (user pmap -- ignored)\n");
2812 Debugger();
2813 }
2814 #endif
2815 return;
2816 }
2817
2818 ptenum = VA_PTE_NUM(va);
2819 wiremask = 1 << ptenum;
2820
2821 s = splvm();
2822 saved_ctx = get_context();
2823 set_context(KERNEL_CONTEXT);
2824 sme = get_segmap(va);
2825 set_context(saved_ctx);
2826 pmegp = pmeg_p(sme);
2827 pmegp->pmeg_wired &= ~wiremask;
2828 splx(s);
2829 }
2830
2831 /*
2832 * Copy the range specified by src_addr/len
2833 * from the source map to the range dst_addr/len
2834 * in the destination map.
2835 *
2836 * This routine is only advisory and need not do anything.
2837 */
2838 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)2839 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
2840 vaddr_t src_addr)
2841 {
2842 }
2843
2844 /*
2845 * This extracts the PMEG associated with the given map/virtual
2846 * address pair. Returns SEGINV if VA not valid.
2847 */
2848 int
_pmap_extract_pmeg(pmap_t pmap,vaddr_t va)2849 _pmap_extract_pmeg(pmap_t pmap, vaddr_t va)
2850 {
2851 int s, saved_ctx, segnum, sme;
2852
2853 s = splvm();
2854
2855 if (pmap == kernel_pmap) {
2856 saved_ctx = get_context();
2857 set_context(KERNEL_CONTEXT);
2858 sme = get_segmap(va);
2859 set_context(saved_ctx);
2860 } else {
2861 /* This is rare, so do it the easy way. */
2862 segnum = VA_SEGNUM(va);
2863 sme = pmap->pm_segmap[segnum];
2864 }
2865
2866 splx(s);
2867 return (sme);
2868 }
2869
2870 /*
2871 * Routine: pmap_extract
2872 * Function:
2873 * Extract the physical page address associated
2874 * with the given map/virtual_address pair.
2875 * Returns zero if VA not valid.
2876 */
2877 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)2878 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2879 {
2880 int s, sme, segnum, ptenum, pte;
2881 paddr_t pa;
2882 int saved_ctx;
2883
2884 pte = 0;
2885 s = splvm();
2886 if (pmap == kernel_pmap) {
2887 saved_ctx = get_context();
2888 set_context(KERNEL_CONTEXT);
2889 sme = get_segmap(va);
2890 if (sme != SEGINV)
2891 pte = get_pte(va);
2892 set_context(saved_ctx);
2893 } else {
2894 /* This is rare, so do it the easy way. */
2895 segnum = VA_SEGNUM(va);
2896 sme = pmap->pm_segmap[segnum];
2897 if (sme != SEGINV) {
2898 ptenum = VA_PTE_NUM(va);
2899 pte = get_pte_pmeg(sme, ptenum);
2900 }
2901 }
2902 splx(s);
2903
2904 if ((pte & PG_VALID) == 0) {
2905 #ifdef PMAP_DEBUG
2906 db_printf("pmap_extract: invalid va=0x%lx\n", va);
2907 Debugger();
2908 #endif
2909 return (false);
2910 }
2911 pa = PG_PA(pte);
2912 #ifdef DIAGNOSTIC
2913 if (pte & PG_TYPE) {
2914 panic("pmap_extract: not main mem, va=0x%lx", va);
2915 }
2916 #endif
2917 if (pap != NULL)
2918 *pap = pa;
2919 return (true);
2920 }
2921
2922
2923 /*
2924 * pmap_page_protect:
2925 *
2926 * Lower the permission for all mappings to a given page.
2927 */
2928 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)2929 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2930 {
2931 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2932 int s;
2933
2934 s = splvm();
2935 #ifdef PMAP_DEBUG
2936 if (pmap_debug & PMD_PROTECT)
2937 printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
2938 #endif
2939 switch (prot) {
2940 case VM_PROT_ALL:
2941 break;
2942 case VM_PROT_READ:
2943 case VM_PROT_READ|VM_PROT_EXECUTE:
2944 pv_changepte(pa, 0, PG_WRITE);
2945 break;
2946 default:
2947 /* remove mapping for all pmaps that have it */
2948 pv_remove_all(pa);
2949 break;
2950 }
2951 splx(s);
2952 }
2953
2954 /*
2955 * Initialize a preallocated and zeroed pmap structure,
2956 * such as one in a vmspace structure.
2957 */
2958 void
pmap_pinit(pmap_t pmap)2959 pmap_pinit(pmap_t pmap)
2960 {
2961 pmap_common_init(pmap);
2962 pmap_user_init(pmap);
2963 }
2964
2965 /*
2966 * Reduce the permissions on the specified
2967 * range of this map as requested.
2968 * (Make pages read-only.)
2969 */
2970 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)2971 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
2972 {
2973 vaddr_t va, neva;
2974 int segnum;
2975
2976 /* If leaving writable, nothing to do. */
2977 if (prot & VM_PROT_WRITE)
2978 return;
2979
2980 /* If removing all permissions, just unmap. */
2981 if ((prot & VM_PROT_READ) == 0) {
2982 pmap_remove(pmap, sva, eva);
2983 return;
2984 }
2985
2986 #ifdef PMAP_DEBUG
2987 if ((pmap_debug & PMD_PROTECT) ||
2988 ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
2989 printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
2990 #endif
2991
2992 KASSERT((pmap == kernel_pmap) ?
2993 sva >= virtual_avail && eva < DVMA_MAP_END :
2994 eva <= VM_MAXUSER_ADDRESS);
2995 va = sva;
2996 segnum = VA_SEGNUM(va);
2997 while (va < eva) {
2998 neva = sun2_trunc_seg(va) + NBSG;
2999 if (neva > eva)
3000 neva = eva;
3001 if (pmap->pm_segmap[segnum] != SEGINV)
3002 pmap_protect1(pmap, va, neva);
3003 va = neva;
3004 segnum++;
3005 }
3006 }
3007
3008 /*
3009 * Remove write permissions in given range.
3010 * (guaranteed to be within one segment)
3011 * similar to pmap_remove1()
3012 */
3013 void
pmap_protect1(pmap_t pmap,vaddr_t sva,vaddr_t eva)3014 pmap_protect1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3015 {
3016 int old_ctx, s, sme;
3017 bool in_ctx;
3018
3019 s = splvm();
3020
3021 #ifdef DIAGNOSTIC
3022 if (sun2_trunc_seg(sva) != sun2_trunc_seg(eva-1))
3023 panic("pmap_protect1: bad range!");
3024 #endif
3025
3026 if (pmap == kernel_pmap) {
3027 old_ctx = get_context();
3028 set_context(KERNEL_CONTEXT);
3029 sme = get_segmap(sva);
3030 if (sme != SEGINV)
3031 pmap_protect_mmu(pmap, sva, eva);
3032 set_context(old_ctx);
3033 goto out;
3034 }
3035 /* It is a user pmap. */
3036
3037 /* There is a PMEG, but maybe not active. */
3038 old_ctx = INVALID_CONTEXT;
3039 in_ctx = false;
3040 if (has_context(pmap)) {
3041 /* Temporary context change. */
3042 old_ctx = get_context();
3043 set_context(pmap->pm_ctxnum);
3044 sme = get_segmap(sva);
3045 if (sme != SEGINV)
3046 in_ctx = true;
3047 }
3048
3049 if (in_ctx == true)
3050 pmap_protect_mmu(pmap, sva, eva);
3051 else
3052 pmap_protect_noctx(pmap, sva, eva);
3053
3054 if (old_ctx != INVALID_CONTEXT) {
3055 /* Restore previous context. */
3056 set_context(old_ctx);
3057 }
3058
3059 out:
3060 splx(s);
3061 }
3062
3063 /*
3064 * Remove write permissions, all in one PMEG,
3065 * where that PMEG is currently in the MMU.
3066 * The current context is already correct.
3067 */
3068 void
pmap_protect_mmu(pmap_t pmap,vaddr_t sva,vaddr_t eva)3069 pmap_protect_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3070 {
3071 vaddr_t pgva, segva __diagused;
3072 int pte;
3073 #ifdef HAVECACHE
3074 int flush_by_page = 0;
3075 #endif
3076
3077 CHECK_SPL();
3078
3079 #ifdef DIAGNOSTIC
3080 if (pmap->pm_ctxnum != get_context())
3081 panic("pmap_protect_mmu: wrong context");
3082 #endif
3083
3084 segva = sun2_trunc_seg(sva);
3085
3086 #ifdef DIAGNOSTIC
3087 int sme = get_segmap(segva);
3088 /* Make sure it is valid and known. */
3089 if (sme == SEGINV)
3090 panic("pmap_protect_mmu: SEGINV");
3091 if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3092 panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
3093 #endif
3094
3095
3096 #ifdef DIAGNOSTIC
3097 /* have pmeg, will travel */
3098 pmeg_t pmegp = pmeg_p(sme);
3099 /* Make sure we own the pmeg, right va, etc. */
3100 if ((pmegp->pmeg_va != segva) ||
3101 (pmegp->pmeg_owner != pmap) ||
3102 (pmegp->pmeg_version != pmap->pm_version))
3103 {
3104 panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
3105 }
3106 if (pmegp->pmeg_vpages < 0)
3107 panic("pmap_protect_mmu: npages corrupted");
3108 if (pmegp->pmeg_vpages == 0)
3109 panic("pmap_protect_mmu: no valid pages?");
3110 #endif
3111
3112 #ifdef HAVECACHE
3113 if (cache_size) {
3114 /*
3115 * If the range to be removed is larger than the cache,
3116 * it will be cheaper to flush this segment entirely.
3117 */
3118 if (cache_size < (eva - sva)) {
3119 /* cheaper to flush whole segment */
3120 cache_flush_segment(segva);
3121 } else {
3122 flush_by_page = 1;
3123 }
3124 }
3125 #endif
3126
3127 /* Remove write permission in the given range. */
3128 for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
3129 pte = get_pte(pgva);
3130 if (pte & PG_VALID) {
3131 #ifdef HAVECACHE
3132 if (flush_by_page) {
3133 cache_flush_page(pgva);
3134 /* Get fresh mod/ref bits from write-back. */
3135 pte = get_pte(pgva);
3136 }
3137 #endif
3138 if (IS_MAIN_MEM(pte)) {
3139 save_modref_bits(pte);
3140 }
3141 pte &= ~(PG_WRITE | PG_MODREF);
3142 set_pte(pgva, pte);
3143 }
3144 }
3145 }
3146
3147 /*
3148 * Remove write permissions, all in one PMEG,
3149 * where it is not currently in any context.
3150 */
3151 void
pmap_protect_noctx(pmap_t pmap,vaddr_t sva,vaddr_t eva)3152 pmap_protect_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3153 {
3154 int old_ctx, pte, sme, segnum;
3155 vaddr_t pgva, segva;
3156
3157 #ifdef DIAGNOSTIC
3158 /* Kernel always in a context (actually, in context zero). */
3159 if (pmap == kernel_pmap)
3160 panic("pmap_protect_noctx: kernel_pmap");
3161 if (pmap->pm_segmap == NULL)
3162 panic("pmap_protect_noctx: null segmap");
3163 #endif
3164
3165 segva = sun2_trunc_seg(sva);
3166 segnum = VA_SEGNUM(segva);
3167 sme = pmap->pm_segmap[segnum];
3168 if (sme == SEGINV)
3169 return;
3170
3171 /*
3172 * Switch to the kernel context so we can access the PMEG
3173 * using the temporary segment.
3174 */
3175 old_ctx = get_context();
3176 set_context(KERNEL_CONTEXT);
3177 #ifdef DIAGNOSTIC
3178 if (temp_seg_inuse)
3179 panic("pmap_protect_noctx: temp_seg_inuse");
3180 temp_seg_inuse++;
3181 #endif
3182 set_segmap(temp_seg_va, sme);
3183 sva += (temp_seg_va - segva);
3184 eva += (temp_seg_va - segva);
3185
3186 /* Remove write permission in the given range. */
3187 for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
3188 pte = get_pte(pgva);
3189 if (pte & PG_VALID) {
3190 /* No cache flush needed. */
3191 if (IS_MAIN_MEM(pte)) {
3192 save_modref_bits(pte);
3193 }
3194 pte &= ~(PG_WRITE | PG_MODREF);
3195 set_pte(pgva, pte);
3196 }
3197 }
3198
3199 /*
3200 * Release the temporary segment, and
3201 * restore the previous context.
3202 */
3203 set_segmap(temp_seg_va, SEGINV);
3204 #ifdef DIAGNOSTIC
3205 temp_seg_inuse--;
3206 #endif
3207 set_context(old_ctx);
3208 }
3209
3210
3211 /*
3212 * Remove the given range of addresses from the specified map.
3213 *
3214 * It is assumed that the start and end are properly
3215 * rounded to the page size.
3216 */
3217 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)3218 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3219 {
3220 vaddr_t va, neva;
3221 int segnum;
3222
3223 #ifdef PMAP_DEBUG
3224 if ((pmap_debug & PMD_REMOVE) ||
3225 ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3226 printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3227 #endif
3228
3229
3230 KASSERT((pmap == kernel_pmap) ?
3231 sva >= virtual_avail && eva < DVMA_MAP_END :
3232 eva <= VM_MAXUSER_ADDRESS);
3233 va = sva;
3234 segnum = VA_SEGNUM(va);
3235 while (va < eva) {
3236 neva = sun2_trunc_seg(va) + NBSG;
3237 if (neva > eva)
3238 neva = eva;
3239 if (pmap->pm_segmap[segnum] != SEGINV)
3240 pmap_remove1(pmap, va, neva);
3241 va = neva;
3242 segnum++;
3243 }
3244 }
3245
3246 /*
3247 * Remove user mappings, all within one segment
3248 */
3249 void
pmap_remove1(pmap_t pmap,vaddr_t sva,vaddr_t eva)3250 pmap_remove1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3251 {
3252 int old_ctx, s, sme;
3253 bool in_ctx;
3254
3255 s = splvm();
3256
3257 #ifdef DIAGNOSTIC
3258 if (sun2_trunc_seg(sva) != sun2_trunc_seg(eva-1))
3259 panic("pmap_remove1: bad range!");
3260 #endif
3261
3262 if (pmap == kernel_pmap) {
3263 old_ctx = get_context();
3264 set_context(KERNEL_CONTEXT);
3265 sme = get_segmap(sva);
3266 if (sme != SEGINV)
3267 pmap_remove_mmu(pmap, sva, eva);
3268 set_context(old_ctx);
3269 goto out;
3270 }
3271 /* It is a user pmap. */
3272
3273 /* There is a PMEG, but maybe not active. */
3274 old_ctx = INVALID_CONTEXT;
3275 in_ctx = false;
3276 if (has_context(pmap)) {
3277 /* Temporary context change. */
3278 old_ctx = get_context();
3279 set_context(pmap->pm_ctxnum);
3280 sme = get_segmap(sva);
3281 if (sme != SEGINV)
3282 in_ctx = true;
3283 }
3284
3285 if (in_ctx == true)
3286 pmap_remove_mmu(pmap, sva, eva);
3287 else
3288 pmap_remove_noctx(pmap, sva, eva);
3289
3290 if (old_ctx != INVALID_CONTEXT) {
3291 /* Restore previous context. */
3292 set_context(old_ctx);
3293 }
3294
3295 out:
3296 splx(s);
3297 }
3298
3299 /*
3300 * Remove some mappings, all in one PMEG,
3301 * where that PMEG is currently in the MMU.
3302 * The current context is already correct.
3303 * If no PTEs remain valid in the PMEG, free it.
3304 */
3305 void
pmap_remove_mmu(pmap_t pmap,vaddr_t sva,vaddr_t eva)3306 pmap_remove_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3307 {
3308 pmeg_t pmegp;
3309 vaddr_t pgva, segva;
3310 int pte, sme;
3311 #ifdef HAVECACHE
3312 int flush_by_page = 0;
3313 #endif
3314
3315 CHECK_SPL();
3316
3317 #ifdef DIAGNOSTIC
3318 if (pmap->pm_ctxnum != get_context())
3319 panic("pmap_remove_mmu: wrong context");
3320 #endif
3321
3322 segva = sun2_trunc_seg(sva);
3323 sme = get_segmap(segva);
3324
3325 #ifdef DIAGNOSTIC
3326 /* Make sure it is valid and known. */
3327 if (sme == SEGINV)
3328 panic("pmap_remove_mmu: SEGINV");
3329 if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3330 panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
3331 #endif
3332
3333 pmegp = pmeg_p(sme);
3334 /* have pmeg, will travel */
3335
3336 #ifdef DIAGNOSTIC
3337 /* Make sure we own the pmeg, right va, etc. */
3338 if ((pmegp->pmeg_va != segva) ||
3339 (pmegp->pmeg_owner != pmap) ||
3340 (pmegp->pmeg_version != pmap->pm_version))
3341 {
3342 panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
3343 }
3344 if (pmegp->pmeg_vpages < 0)
3345 panic("pmap_remove_mmu: npages corrupted");
3346 if (pmegp->pmeg_vpages == 0)
3347 panic("pmap_remove_mmu: no valid pages?");
3348 #endif
3349
3350 #ifdef HAVECACHE
3351 if (cache_size) {
3352 /*
3353 * If the range to be removed is larger than the cache,
3354 * it will be cheaper to flush this segment entirely.
3355 */
3356 if (cache_size < (eva - sva)) {
3357 /* cheaper to flush whole segment */
3358 cache_flush_segment(segva);
3359 } else {
3360 flush_by_page = 1;
3361 }
3362 }
3363 #endif
3364
3365 /* Invalidate the PTEs in the given range. */
3366 for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
3367 pte = get_pte(pgva);
3368 if (pte & PG_VALID) {
3369 #ifdef HAVECACHE
3370 if (flush_by_page) {
3371 cache_flush_page(pgva);
3372 /* Get fresh mod/ref bits from write-back. */
3373 pte = get_pte(pgva);
3374 }
3375 #endif
3376 if (IS_MAIN_MEM(pte)) {
3377 save_modref_bits(pte);
3378 pv_unlink(pmap, pte, pgva);
3379 }
3380 #ifdef PMAP_DEBUG
3381 if ((pmap_debug & PMD_SETPTE) ||
3382 (pgva == pmap_db_watchva)) {
3383 printf("pmap: set_pte pmap=%p va=0x%lx"
3384 " old=0x%x new=0x%x (rrmmu)\n",
3385 pmap, pgva, pte, PG_INVAL);
3386 }
3387 #endif
3388 set_pte(pgva, PG_INVAL);
3389 KASSERT(pmegp->pmeg_vpages > 0);
3390 pmegp->pmeg_vpages--;
3391 }
3392 }
3393
3394 KASSERT(pmegp->pmeg_vpages >= 0);
3395 if (pmegp->pmeg_vpages == 0) {
3396 /* We are done with this pmeg. */
3397 if (is_pmeg_wired(pmegp)) {
3398 #ifdef PMAP_DEBUG
3399 if (pmap_debug & PMD_WIRING) {
3400 db_printf("pmap: removing wired pmeg: %p\n",
3401 pmegp);
3402 Debugger();
3403 }
3404 #endif /* PMAP_DEBUG */
3405 }
3406
3407 #ifdef PMAP_DEBUG
3408 if (pmap_debug & PMD_SEGMAP) {
3409 printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
3410 "new=ff (rm)\n",
3411 pmap->pm_ctxnum, segva, pmegp->pmeg_index);
3412 }
3413 pmeg_verify_empty(segva);
3414 #endif
3415
3416 /* Remove it from the MMU. */
3417 if (kernel_pmap == pmap) {
3418 /* Did cache flush above. */
3419 set_segmap(segva, SEGINV);
3420 } else {
3421 /* Did cache flush above. */
3422 set_segmap(segva, SEGINV);
3423 }
3424 pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
3425 /* Now, put it on the free list. */
3426 pmeg_free(pmegp);
3427 }
3428 }
3429
3430 /*
3431 * Remove some mappings, all in one PMEG,
3432 * where it is not currently in any context.
3433 */
3434 void
pmap_remove_noctx(pmap_t pmap,vaddr_t sva,vaddr_t eva)3435 pmap_remove_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3436 {
3437 pmeg_t pmegp;
3438 int old_ctx, pte, sme, segnum;
3439 vaddr_t pgva, segva;
3440
3441 CHECK_SPL();
3442
3443 #ifdef DIAGNOSTIC
3444 /* Kernel always in a context (actually, in context zero). */
3445 if (pmap == kernel_pmap)
3446 panic("pmap_remove_noctx: kernel_pmap");
3447 if (pmap->pm_segmap == NULL)
3448 panic("pmap_remove_noctx: null segmap");
3449 #endif
3450
3451 segva = sun2_trunc_seg(sva);
3452 segnum = VA_SEGNUM(segva);
3453 sme = pmap->pm_segmap[segnum];
3454 if (sme == SEGINV)
3455 return;
3456 pmegp = pmeg_p(sme);
3457
3458 /*
3459 * Switch to the kernel context so we can access the PMEG
3460 * using the temporary segment.
3461 */
3462 old_ctx = get_context();
3463 set_context(KERNEL_CONTEXT);
3464 #ifdef DIAGNOSTIC
3465 if (temp_seg_inuse)
3466 panic("pmap_remove_noctx: temp_seg_inuse");
3467 temp_seg_inuse++;
3468 #endif
3469 set_segmap(temp_seg_va, sme);
3470 sva += (temp_seg_va - segva);
3471 eva += (temp_seg_va - segva);
3472
3473 /* Invalidate the PTEs in the given range. */
3474 for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
3475 pte = get_pte(pgva);
3476 if (pte & PG_VALID) {
3477 /* No cache flush needed. */
3478 if (IS_MAIN_MEM(pte)) {
3479 save_modref_bits(pte);
3480 pv_unlink(pmap, pte,
3481 pgva - (temp_seg_va - segva));
3482 }
3483 #ifdef PMAP_DEBUG
3484 if ((pmap_debug & PMD_SETPTE) ||
3485 (pgva == pmap_db_watchva)) {
3486 printf("pmap: set_pte pmap=%p va=0x%lx"
3487 " old=0x%x new=0x%x (rrncx)\n",
3488 pmap, pgva, pte, PG_INVAL);
3489 }
3490 #endif
3491 set_pte(pgva, PG_INVAL);
3492 KASSERT(pmegp->pmeg_vpages > 0);
3493 pmegp->pmeg_vpages--;
3494 }
3495 }
3496
3497 /*
3498 * Release the temporary segment, and
3499 * restore the previous context.
3500 */
3501 set_segmap(temp_seg_va, SEGINV);
3502 #ifdef DIAGNOSTIC
3503 temp_seg_inuse--;
3504 #endif
3505 set_context(old_ctx);
3506
3507 KASSERT(pmegp->pmeg_vpages >= 0);
3508 if (pmegp->pmeg_vpages == 0) {
3509 /* We are done with this pmeg. */
3510 if (is_pmeg_wired(pmegp)) {
3511 #ifdef PMAP_DEBUG
3512 if (pmap_debug & PMD_WIRING) {
3513 db_printf("pmap: removing wired pmeg: %p\n",
3514 pmegp);
3515 Debugger();
3516 }
3517 #endif /* PMAP_DEBUG */
3518 }
3519
3520 pmap->pm_segmap[segnum] = SEGINV;
3521 pmeg_free(pmegp);
3522 }
3523 }
3524
3525
3526 /*
3527 * Count resident pages in this pmap.
3528 * See: kern_sysctl.c:pmap_resident_count
3529 */
3530 segsz_t
pmap_resident_pages(pmap_t pmap)3531 pmap_resident_pages(pmap_t pmap)
3532 {
3533 int i, sme, pages;
3534 pmeg_t pmeg;
3535
3536 if (pmap->pm_segmap == 0)
3537 return (0);
3538
3539 pages = 0;
3540 for (i = 0; i < NUSEG; i++) {
3541 sme = pmap->pm_segmap[i];
3542 if (sme != SEGINV) {
3543 pmeg = pmeg_p(sme);
3544 pages += pmeg->pmeg_vpages;
3545 }
3546 }
3547 return (pages);
3548 }
3549
3550 /*
3551 * Count wired pages in this pmap.
3552 * See vm_mmap.c:pmap_wired_count
3553 */
3554 segsz_t
pmap_wired_pages(pmap_t pmap)3555 pmap_wired_pages(pmap_t pmap)
3556 {
3557 int i, mask, sme, pages;
3558 pmeg_t pmeg;
3559
3560 if (pmap->pm_segmap == 0)
3561 return (0);
3562
3563 pages = 0;
3564 for (i = 0; i < NUSEG; i++) {
3565 sme = pmap->pm_segmap[i];
3566 if (sme != SEGINV) {
3567 pmeg = pmeg_p(sme);
3568 mask = 0x8000;
3569 do {
3570 if (pmeg->pmeg_wired & mask)
3571 pages++;
3572 mask = (mask >> 1);
3573 } while (mask);
3574 }
3575 }
3576 return (pages);
3577 }
3578
3579
3580 /*
3581 * pmap_copy_page copies the specified (machine independent)
3582 * page by mapping the page into virtual memory and using
3583 * bcopy to copy the page, one machine dependent page at a
3584 * time.
3585 */
3586 void
pmap_copy_page(paddr_t src,paddr_t dst)3587 pmap_copy_page(paddr_t src, paddr_t dst)
3588 {
3589 int pte;
3590 int s;
3591 int saved_ctx;
3592
3593 s = splvm();
3594
3595 #ifdef PMAP_DEBUG
3596 if (pmap_debug & PMD_COW)
3597 printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
3598 #endif
3599
3600 /*
3601 * Temporarily switch to the kernel context to use the
3602 * tmp_vpages.
3603 */
3604 saved_ctx = get_context();
3605 set_context(KERNEL_CONTEXT);
3606 #ifdef DIAGNOSTIC
3607 if (tmp_vpages_inuse)
3608 panic("pmap_copy_page: vpages inuse");
3609 tmp_vpages_inuse++;
3610 #endif
3611
3612 /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3613 /* All mappings to vmp_vpages are non-cached, so no flush. */
3614 pte = PG_PERM | PA_PGNUM(src);
3615 set_pte(tmp_vpages[0], pte);
3616 pte = PG_PERM | PA_PGNUM(dst);
3617 set_pte(tmp_vpages[1], pte);
3618 copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
3619 set_pte(tmp_vpages[0], PG_INVAL);
3620 set_pte(tmp_vpages[1], PG_INVAL);
3621
3622 #ifdef DIAGNOSTIC
3623 tmp_vpages_inuse--;
3624 #endif
3625 set_context(saved_ctx);
3626
3627 splx(s);
3628 }
3629
3630 /*
3631 * pmap_zero_page zeros the specified (machine independent)
3632 * page by mapping the page into virtual memory and using
3633 * bzero to clear its contents, one machine dependent page
3634 * at a time.
3635 */
3636 void
pmap_zero_page(paddr_t pa)3637 pmap_zero_page(paddr_t pa)
3638 {
3639 int pte;
3640 int s;
3641 int saved_ctx;
3642
3643 s = splvm();
3644
3645 #ifdef PMAP_DEBUG
3646 if (pmap_debug & PMD_COW)
3647 printf("pmap_zero_page: 0x%lx\n", pa);
3648 #endif
3649
3650 /*
3651 * Temporarily switch to the kernel context to use the
3652 * tmp_vpages.
3653 */
3654 saved_ctx = get_context();
3655 set_context(KERNEL_CONTEXT);
3656 #ifdef DIAGNOSTIC
3657 if (tmp_vpages_inuse)
3658 panic("pmap_zero_page: vpages inuse");
3659 tmp_vpages_inuse++;
3660 #endif
3661
3662 /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3663 /* All mappings to vmp_vpages are non-cached, so no flush. */
3664 pte = PG_PERM | PA_PGNUM(pa);
3665 set_pte(tmp_vpages[0], pte);
3666 zeropage((char *) tmp_vpages[0]);
3667 set_pte(tmp_vpages[0], PG_INVAL);
3668
3669 #ifdef DIAGNOSTIC
3670 tmp_vpages_inuse--;
3671 #endif
3672 set_context(saved_ctx);
3673
3674 splx(s);
3675 }
3676
3677 /*
3678 * Find first virtual address >= *va that is
3679 * least likely to cause cache aliases.
3680 * (This will just seg-align mappings.)
3681 */
3682 void
pmap_prefer(vaddr_t fo,vaddr_t * va,int td)3683 pmap_prefer(vaddr_t fo, vaddr_t *va, int td)
3684 {
3685 long d;
3686
3687 d = fo - *va;
3688 d &= SEGOFSET;
3689 if (d == 0) {
3690 return;
3691 }
3692 if (td) {
3693 *va -= SEGOFSET + 1;
3694 }
3695 *va += d;
3696 }
3697
3698 /*
3699 * Fill in the sun2-specific part of the kernel core header
3700 * for dumpsys(). (See machdep.c for the rest.)
3701 */
3702 void
pmap_kcore_hdr(struct sun2_kcore_hdr * sh)3703 pmap_kcore_hdr(struct sun2_kcore_hdr *sh)
3704 {
3705 vaddr_t va;
3706 u_char *cp, *ep;
3707 int saved_ctx;
3708
3709 sh->segshift = SEGSHIFT;
3710 sh->pg_frame = PG_FRAME;
3711 sh->pg_valid = PG_VALID;
3712
3713 /* Copy the kernel segmap (256 bytes). */
3714 va = KERNBASE;
3715 cp = sh->ksegmap;
3716 ep = cp + sizeof(sh->ksegmap);
3717 saved_ctx = get_context();
3718 set_context(KERNEL_CONTEXT);
3719 do {
3720 *cp = get_segmap(va);
3721 va += NBSG;
3722 cp++;
3723 } while (cp < ep);
3724 set_context(saved_ctx);
3725 }
3726
3727 /*
3728 * Copy the pagemap RAM into the passed buffer (one page)
3729 * starting at OFF in the pagemap RAM.
3730 */
3731 void
pmap_get_pagemap(int * pt,int off)3732 pmap_get_pagemap(int *pt, int off)
3733 {
3734 vaddr_t va, va_end;
3735 int sme, sme_end; /* SegMap Entry numbers */
3736 int saved_ctx;
3737
3738 sme = (off / (NPAGSEG * sizeof(*pt))); /* PMEG to start on */
3739 sme_end =
3740 sme + (PAGE_SIZE / (NPAGSEG * sizeof(*pt))); /* where to stop */
3741 va_end = temp_seg_va + NBSG;
3742
3743 saved_ctx = get_context();
3744 set_context(KERNEL_CONTEXT);
3745 do {
3746 set_segmap(temp_seg_va, sme);
3747 va = temp_seg_va;
3748 do {
3749 *pt++ = get_pte(va);
3750 va += PAGE_SIZE;
3751 } while (va < va_end);
3752 sme++;
3753 } while (sme < sme_end);
3754 set_segmap(temp_seg_va, SEGINV);
3755 set_context(saved_ctx);
3756 }
3757
3758
3759 /*
3760 * Helper functions for changing unloaded PMEGs
3761 */
3762
3763 static int
get_pte_pmeg(int pmeg_num,int page_num)3764 get_pte_pmeg(int pmeg_num, int page_num)
3765 {
3766 vaddr_t va;
3767 int pte;
3768 int saved_ctx;
3769
3770 CHECK_SPL();
3771 saved_ctx = get_context();
3772 set_context(KERNEL_CONTEXT);
3773 #ifdef DIAGNOSTIC
3774 if (temp_seg_inuse)
3775 panic("get_pte_pmeg: temp_seg_inuse");
3776 temp_seg_inuse++;
3777 #endif
3778
3779 va = temp_seg_va;
3780 set_segmap(temp_seg_va, pmeg_num);
3781 va += PAGE_SIZE*page_num;
3782 pte = get_pte(va);
3783 set_segmap(temp_seg_va, SEGINV);
3784
3785 #ifdef DIAGNOSTIC
3786 temp_seg_inuse--;
3787 #endif
3788 set_context(saved_ctx);
3789 return pte;
3790 }
3791
3792 static void
set_pte_pmeg(int pmeg_num,int page_num,int pte)3793 set_pte_pmeg(int pmeg_num, int page_num, int pte)
3794 {
3795 vaddr_t va;
3796 int saved_ctx;
3797
3798 CHECK_SPL();
3799 saved_ctx = get_context();
3800 set_context(KERNEL_CONTEXT);
3801 #ifdef DIAGNOSTIC
3802 if (temp_seg_inuse)
3803 panic("set_pte_pmeg: temp_seg_inuse");
3804 temp_seg_inuse++;
3805 #endif
3806
3807 /* We never access data in temp_seg_va so no need to flush. */
3808 va = temp_seg_va;
3809 set_segmap(temp_seg_va, pmeg_num);
3810 va += PAGE_SIZE*page_num;
3811 set_pte(va, pte);
3812 set_segmap(temp_seg_va, SEGINV);
3813
3814 #ifdef DIAGNOSTIC
3815 temp_seg_inuse--;
3816 #endif
3817 set_context(saved_ctx);
3818 }
3819
3820 /*
3821 * Routine: pmap_procwr
3822 *
3823 * Function:
3824 * Synchronize caches corresponding to [addr, addr+len) in p.
3825 */
3826 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)3827 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3828 {
3829 }
3830
3831
3832 #ifdef PMAP_DEBUG
3833 /* Things to call from the debugger. */
3834
3835 void
pmap_print(pmap_t pmap)3836 pmap_print(pmap_t pmap)
3837 {
3838 db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
3839 db_printf(" pm_version=0x%x\n", pmap->pm_version);
3840 db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
3841 }
3842
3843 void
pmeg_print(pmeg_t pmegp)3844 pmeg_print(pmeg_t pmegp)
3845 {
3846 db_printf("link_next=%p link_prev=%p\n",
3847 TAILQ_NEXT(pmegp, pmeg_link),
3848 TAILQ_PREV(pmegp, pmeg_tailq, pmeg_link));
3849 db_printf("index=0x%x owner=%p own_vers=0x%x\n",
3850 pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
3851 db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
3852 pmegp->pmeg_va, pmegp->pmeg_wired,
3853 pmegp->pmeg_reserved, pmegp->pmeg_vpages,
3854 pmegp->pmeg_qstate);
3855 }
3856
3857 void
pv_print(paddr_t pa)3858 pv_print(paddr_t pa)
3859 {
3860 pv_entry_t pv;
3861 int idx;
3862
3863 idx = PA_PGNUM(pa);
3864 if (idx >= physmem) {
3865 db_printf("bad address\n");
3866 return;
3867 }
3868 db_printf("pa=0x%lx, flags=0x%x\n",
3869 pa, pv_flags_tbl[idx]);
3870
3871 pv = pv_head_tbl[idx];
3872 while (pv) {
3873 db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
3874 pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
3875 pv = pv->pv_next;
3876 }
3877 }
3878 #endif /* PMAP_DEBUG */
3879