1 /* $NetBSD: pmap.c,v 1.117 2022/03/20 18:56:29 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2011 Reinoud Zandijk <reinoud@NetBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.117 2022/03/20 18:56:29 andvar Exp $");
31
32 #include "opt_memsize.h"
33 #include "opt_kmempages.h"
34 #include "opt_misc.h"
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/mutex.h>
39 #include <sys/buf.h>
40 #include <sys/kmem.h>
41 #include <sys/malloc.h>
42 #include <sys/pool.h>
43 #include <machine/thunk.h>
44 #include <machine/machdep.h>
45 #include <machine/pcb.h>
46
47 #include <uvm/uvm.h>
48
49 struct pv_entry {
50 struct pv_entry *pv_next;
51 pmap_t pv_pmap;
52 uintptr_t pv_ppn; /* physical page number */
53 uintptr_t pv_lpn; /* logical page number */
54 vm_prot_t pv_prot; /* logical protection */
55 uint8_t pv_mmap_ppl; /* programmed protection */
56 uint8_t pv_vflags; /* per mapping flags */
57 #define PV_WIRED 0x01 /* wired mapping */
58 #define PV_UNMANAGED 0x02 /* entered by pmap_kenter_ */
59 uint8_t pv_pflags; /* per phys page flags */
60 #define PV_REFERENCED 0x01
61 #define PV_MODIFIED 0x02
62 };
63
64 #define PMAP_L2_SIZE PAGE_SIZE
65 #define PMAP_L2_NENTRY (PMAP_L2_SIZE / sizeof(struct pv_entry *))
66
67 struct pmap_l2 {
68 struct pv_entry *pm_l2[PMAP_L2_NENTRY];
69 };
70
71 struct pmap {
72 int pm_count;
73 int pm_flags;
74 #define PM_ACTIVE 0x01
75 struct pmap_statistics pm_stats;
76 struct pmap_l2 **pm_l1;
77 };
78
79 /*
80 * pv_table is list of pv_entry structs completely spanning the total memory.
81 * It is indexed on physical page number. Each entry will be daisy chained
82 * with pv_entry records for each usage in all the pmaps.
83 *
84 * kernel_pm_entries contains all kernel L2 pages for its complete map.
85 *
86 */
87
88 static struct pv_entry **kernel_pm_entries;
89 static struct pv_entry *pv_table; /* physical pages info (direct mapped) */
90 static struct pv_entry **tlb; /* current tlb mappings (direct mapped) */
91 static struct pmap pmap_kernel_store;
92 struct pmap * const kernel_pmap_ptr = &pmap_kernel_store;
93
94 static pmap_t active_pmap = NULL;
95
96 static char mem_name[20] = "";
97 static int mem_fh;
98
99 static int phys_npages = 0;
100 static int pm_nentries = 0;
101 static int pm_nl1 = 0;
102 static int pm_l1_size = 0;
103 static uint64_t pm_entries_size = 0;
104 static void *pm_tmp_p0;
105 static void *pm_tmp_p1;
106
107 static struct pool pmap_pool;
108 static struct pool pmap_pventry_pool;
109
110 /* forwards */
111 void pmap_bootstrap(void);
112 static void pmap_page_activate(struct pv_entry *pv);
113 static void pmap_page_deactivate(struct pv_entry *pv);
114 static void pv_update(struct pv_entry *pv);
115 static void pmap_update_page(uintptr_t ppn);
116 bool pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype);
117
118 static struct pv_entry *pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn);
119 static struct pv_entry *pv_alloc(void);
120 static void pv_free(struct pv_entry *pv);
121 static void pmap_deferred_init(void);
122
123 extern void setup_signal_handlers(void);
124
125 /* exposed (to signal handler f.e.) */
126 vaddr_t kmem_k_start, kmem_k_end;
127 vaddr_t kmem_kvm_start, kmem_kvm_end;
128 vaddr_t kmem_user_start, kmem_user_end;
129 vaddr_t kmem_kvm_cur_start, kmem_kvm_cur_end;
130
131 /* amount of physical memory */
132 int num_pv_entries = 0;
133 int num_pmaps = 0;
134
135 #define SPARSE_MEMFILE
136
137
138 void
pmap_bootstrap(void)139 pmap_bootstrap(void)
140 {
141 struct pmap *pmap;
142 paddr_t DRAM_cfg;
143 paddr_t fpos, file_len;
144 paddr_t kernel_fpos, pv_fpos, tlb_fpos, pm_l1_fpos, pm_fpos;
145 paddr_t wlen;
146 paddr_t barrier_len;
147 paddr_t pv_table_size;
148 vaddr_t free_start, free_end;
149 paddr_t pa;
150 vaddr_t va;
151 size_t kmem_k_length, written;
152 uintptr_t pg, l1;
153 void *addr;
154 int err;
155
156 extern void _start(void); /* start of kernel */
157 extern int etext; /* end of the kernel */
158 extern int edata; /* end of the init. data segment */
159 extern int end; /* end of bss */
160 vaddr_t vm_min_addr;
161
162 vm_min_addr = thunk_get_vm_min_address();
163 vm_min_addr = vm_min_addr < PAGE_SIZE ? PAGE_SIZE : vm_min_addr;
164
165 thunk_printf_debug("Information retrieved from system and elf image\n");
166 thunk_printf_debug("min VM address at %p\n", (void *) vm_min_addr);
167 thunk_printf_debug("start kernel at %p\n", _start);
168 thunk_printf_debug(" end kernel at %p\n", &etext);
169 thunk_printf_debug(" end of init. data at %p\n", &edata);
170 thunk_printf_debug("1st end of data at %p\n", &end);
171 thunk_printf_debug("CUR end data at %p\n", thunk_sbrk(0));
172
173 barrier_len = 2 * 1024 * 1024;
174
175 /* calculate kernel section (R-X) */
176 kmem_k_start = (vaddr_t) PAGE_SIZE * (atop(_start) );
177 kmem_k_end = (vaddr_t) PAGE_SIZE * (atop(&etext) + 1);
178 kmem_k_length = kmem_k_end - kmem_k_start;
179
180 /* calculate total available memory space & available pages */
181 DRAM_cfg = (vaddr_t) TEXTADDR;
182 physmem = DRAM_cfg / PAGE_SIZE;
183
184 /* kvm at the top */
185 kmem_kvm_end = kmem_k_start - barrier_len;
186 kmem_kvm_start = kmem_kvm_end - KVMSIZE;
187
188 /* allow some pmap scratch space */
189 pm_tmp_p0 = (void *) (kmem_kvm_start);
190 pm_tmp_p1 = (void *) (kmem_kvm_start + PAGE_SIZE);
191 kmem_kvm_start += 2*PAGE_SIZE;
192
193 /* claim an area for userland (---/R--/RW-/RWX) */
194 kmem_user_start = vm_min_addr;
195 kmem_user_end = kmem_kvm_start - barrier_len;
196
197 /* print summary */
198 aprint_verbose("\nMemory summary\n");
199 aprint_verbose("\tkmem_user_start\t%p\n", (void *) kmem_user_start);
200 aprint_verbose("\tkmem_user_end\t%p\n", (void *) kmem_user_end);
201 aprint_verbose("\tkmem_k_start\t%p\n", (void *) kmem_k_start);
202 aprint_verbose("\tkmem_k_end\t%p\n", (void *) kmem_k_end);
203 aprint_verbose("\tkmem_kvm_start\t%p\n", (void *) kmem_kvm_start);
204 aprint_verbose("\tkmem_kvm_end\t%p\n", (void *) kmem_kvm_end);
205
206 aprint_verbose("\tDRAM_cfg\t%10d\n", (int) DRAM_cfg);
207 aprint_verbose("\tkvmsize\t\t%10d\n", (int) KVMSIZE);
208 aprint_verbose("\tuser_len\t%10d\n",
209 (int) (kmem_user_end - kmem_user_start));
210
211 aprint_verbose("\n\n");
212
213 /* make critical assertions before modifying anything */
214 if (sizeof(struct pcb) > USPACE) {
215 panic("sizeof(struct pcb) is %d bytes too big for USPACE. "
216 "Please adjust TRAPSTACKSIZE calculation",
217 (int) (USPACE - sizeof(struct pcb)));
218 }
219 if (TRAPSTACKSIZE < 4*PAGE_SIZE) {
220 panic("TRAPSTACKSIZE is too small, please increase UPAGES");
221 }
222 if (sizeof(struct pmap_l2) > PAGE_SIZE) {
223 panic("struct pmap_l2 bigger than one page?\n");
224 }
225
226 /* protect user memory UVM area (---) */
227 err = thunk_munmap((void *) kmem_user_start,
228 kmem_k_start - kmem_user_start);
229 if (err)
230 panic("pmap_bootstrap: userland uvm space protection "
231 "failed (%d)\n", thunk_geterrno());
232
233 #if 0
234 /* protect kvm UVM area if separate (---) */
235 err = thunk_munmap((void *) kmem_kvm_start,
236 kmem_kvm_end - kmem_kvm_start);
237 if (err)
238 panic("pmap_bootstrap: kvm uvm space protection "
239 "failed (%d)\n", thunk_geterrno());
240 #endif
241
242 thunk_printf_debug("Creating memory mapped backend\n");
243
244 /* create memory file since mmap/maccess only can be on files */
245 strlcpy(mem_name, "/tmp/netbsd.XXXXXX", sizeof(mem_name));
246 mem_fh = thunk_mkstemp(mem_name);
247 if (mem_fh < 0)
248 panic("pmap_bootstrap: can't create memory file\n");
249
250 /* unlink the file so space is freed when we quit */
251 if (thunk_unlink(mem_name) == -1)
252 panic("pmap_bootstrap: can't unlink %s", mem_name);
253
254 /* file_len is the backing store length, nothing to do with placement */
255 file_len = DRAM_cfg;
256
257 #ifdef SPARSE_MEMFILE
258 {
259 char dummy;
260
261 wlen = thunk_pwrite(mem_fh, &dummy, 1, file_len - 1);
262 if (wlen != 1)
263 panic("pmap_bootstrap: can't grow file\n");
264 }
265 #else
266 {
267 char block[PAGE_SIZE];
268
269 printf("Creating memory file\r");
270 for (pg = 0; pg < file_len; pg += PAGE_SIZE) {
271 wlen = thunk_pwrite(mem_fh, block, PAGE_SIZE, pg);
272 if (wlen != PAGE_SIZE)
273 panic("pmap_bootstrap: write fails, disc full?");
274 }
275 }
276 #endif
277
278 /* protect the current kernel section */
279 err = thunk_mprotect((void *) kmem_k_start, kmem_k_length,
280 THUNK_PROT_READ | THUNK_PROT_EXEC);
281 assert(err == 0);
282
283 /* madvise the host kernel about our intentions with the memory */
284 /* no measured effect, but might make a difference on high load */
285 err = thunk_madvise((void *) kmem_user_start,
286 kmem_k_start - kmem_user_start,
287 THUNK_MADV_WILLNEED | THUNK_MADV_RANDOM);
288 assert(err == 0);
289
290 /* map the kernel at the start of the 'memory' file */
291 kernel_fpos = 0;
292 written = thunk_pwrite(mem_fh, (void *) kmem_k_start, kmem_k_length,
293 kernel_fpos);
294 assert(written == kmem_k_length);
295 fpos = kernel_fpos + kmem_k_length;
296
297 /* initialize counters */
298 free_start = fpos; /* in physical space ! */
299 free_end = file_len; /* in physical space ! */
300 kmem_kvm_cur_start = kmem_kvm_start;
301
302 /* calculate pv table size */
303 phys_npages = file_len / PAGE_SIZE;
304 pv_table_size = round_page(phys_npages * sizeof(struct pv_entry));
305 thunk_printf_debug("claiming %"PRIu64" KB of pv_table for "
306 "%"PRIdPTR" pages of physical memory\n",
307 (uint64_t) pv_table_size/1024, (uintptr_t) phys_npages);
308
309 /* calculate number of pmap entries needed for a complete map */
310 pm_nentries = (kmem_k_end - VM_MIN_ADDRESS) / PAGE_SIZE;
311 pm_entries_size = round_page(pm_nentries * sizeof(struct pv_entry *));
312 thunk_printf_debug("tlb va->pa lookup table is %"PRIu64" KB for "
313 "%d logical pages\n", pm_entries_size/1024, pm_nentries);
314
315 /* calculate how big the l1 tables are going to be */
316 pm_nl1 = pm_nentries / PMAP_L2_NENTRY;
317 pm_l1_size = round_page(pm_nl1 * sizeof(struct pmap_l1 *));
318
319 /* claim pv table */
320 pv_fpos = fpos;
321 pv_table = (struct pv_entry *) kmem_kvm_cur_start;
322 addr = thunk_mmap(pv_table, pv_table_size,
323 THUNK_PROT_READ | THUNK_PROT_WRITE,
324 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
325 mem_fh, pv_fpos);
326 if (addr != (void *) pv_table)
327 panic("pmap_bootstrap: can't map in pv table\n");
328
329 memset(pv_table, 0, pv_table_size); /* test and clear */
330
331 thunk_printf_debug("pv_table initialised correctly, mmap works\n");
332
333 /* advance */
334 kmem_kvm_cur_start += pv_table_size;
335 fpos += pv_table_size;
336
337 /* set up tlb space */
338 tlb = (struct pv_entry **) kmem_kvm_cur_start;
339 tlb_fpos = fpos;
340 addr = thunk_mmap(tlb, pm_entries_size,
341 THUNK_PROT_READ | THUNK_PROT_WRITE,
342 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
343 mem_fh, tlb_fpos);
344 if (addr != (void *) tlb)
345 panic("pmap_bootstrap: can't map in tlb entries\n");
346
347 memset(tlb, 0, pm_entries_size); /* test and clear */
348
349 thunk_printf_debug("kernel tlb entries initialized correctly\n");
350
351 /* advance */
352 kmem_kvm_cur_start += pm_entries_size;
353 fpos += pm_entries_size;
354
355 /* set up kernel pmap and add a l1 map */
356 pmap = pmap_kernel();
357 memset(pmap, 0, sizeof(*pmap));
358 pmap->pm_count = 1; /* reference */
359 pmap->pm_flags = PM_ACTIVE; /* kernel pmap is always active */
360 pmap->pm_l1 = (struct pmap_l2 **) kmem_kvm_cur_start;
361
362 pm_l1_fpos = fpos;
363 addr = thunk_mmap(pmap->pm_l1, pm_l1_size,
364 THUNK_PROT_READ | THUNK_PROT_WRITE,
365 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
366 mem_fh, pm_l1_fpos);
367 if (addr != (void *) pmap->pm_l1)
368 panic("pmap_bootstrap: can't map in pmap l1 entries\n");
369
370 memset(pmap->pm_l1, 0, pm_l1_size); /* test and clear */
371
372 thunk_printf_debug("kernel pmap l1 table initialised correctly\n");
373
374 /* advance for l1 tables */
375 kmem_kvm_cur_start += round_page(pm_l1_size);
376 fpos += round_page(pm_l1_size);
377
378 /* followed by the pm entries */
379 pm_fpos = fpos;
380 kernel_pm_entries = (struct pv_entry **) kmem_kvm_cur_start;
381 addr = thunk_mmap(kernel_pm_entries, pm_entries_size,
382 THUNK_PROT_READ | THUNK_PROT_WRITE,
383 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
384 mem_fh, pm_fpos);
385 if (addr != (void *) kernel_pm_entries)
386 panic("pmap_bootstrap: can't map in kernel pmap entries\n");
387
388 memset(kernel_pm_entries, 0, pm_entries_size); /* test and clear */
389
390 /* advance for the statically allocated pm_entries */
391 kmem_kvm_cur_start += pm_entries_size;
392 fpos += pm_entries_size;
393
394 /* put pointers in the l1 to point to the pv_entry space */
395 for (l1 = 0; l1 < pm_nl1; l1++) {
396 pmap = pmap_kernel();
397 pmap->pm_l1[l1] = (struct pmap_l2 *)
398 ((vaddr_t) kernel_pm_entries + l1 * PMAP_L2_SIZE);
399 }
400
401 /* kmem used [kmem_kvm_start - kmem_kvm_cur_start] */
402 kmem_kvm_cur_end = kmem_kvm_cur_start;
403
404 /* manually enter the mappings into the kernel map */
405 for (pg = 0; pg < pv_table_size; pg += PAGE_SIZE) {
406 pa = pv_fpos + pg;
407 va = (vaddr_t) pv_table + pg;
408 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
409 }
410 thunk_printf_debug("pv_table mem added to the kernel pmap\n");
411 for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) {
412 pa = tlb_fpos + pg;
413 va = (vaddr_t) tlb + pg;
414 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
415 }
416 thunk_printf_debug("kernel tlb entries mem added to the kernel pmap\n");
417 for (pg = 0; pg < pm_l1_size; pg += PAGE_SIZE) {
418 pa = pm_l1_fpos + pg;
419 va = (vaddr_t) pmap->pm_l1 + pg;
420 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
421 }
422 thunk_printf_debug("kernel pmap l1 mem added to the kernel pmap\n");
423 for (pg = 0; pg < pm_entries_size; pg += PAGE_SIZE) {
424 pa = pm_fpos + pg;
425 va = (vaddr_t) kernel_pm_entries + pg;
426 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
427 }
428 thunk_printf_debug("kernel pmap entries mem added to the kernel pmap\n");
429 #if 0
430 /* not yet, or not needed */
431 for (pg = 0; pg < kmem_k_length; pg += PAGE_SIZE) {
432 pa = kernel_fpos + pg;
433 va = (vaddr_t) kmem_k_start + pg;
434 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 0);
435 }
436 thunk_printf_debug("kernel mem added to the kernel pmap\n");
437 #endif
438
439 /* add file space to uvm's FREELIST */
440 uvm_page_physload(atop(0),
441 atop(free_end),
442 atop(free_start + fpos), /* mark used till fpos */
443 atop(free_end),
444 VM_FREELIST_DEFAULT);
445
446 /* setup syscall emulation */
447 if (thunk_syscallemu_init((void *)VM_MIN_ADDRESS,
448 (void *)VM_MAXUSER_ADDRESS) != 0)
449 panic("couldn't enable syscall emulation");
450
451 aprint_verbose("leaving pmap_bootstrap:\n");
452 aprint_verbose("\t%"PRIu64" MB of physical pages left\n",
453 (uint64_t) (free_end - (free_start + fpos))/1024/1024);
454 aprint_verbose("\t%"PRIu64" MB of kmem left\n",
455 (uint64_t) (kmem_kvm_end - kmem_kvm_cur_end)/1024/1024);
456
457 setup_signal_handlers();
458 }
459
460 void
pmap_init(void)461 pmap_init(void)
462 {
463 }
464
465 /* return kernel space start and end (including growth) */
466 void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)467 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
468 {
469 if (vstartp)
470 *vstartp = kmem_kvm_cur_start; /* min to map in */
471 if (vendp)
472 *vendp = kmem_kvm_end - PAGE_SIZE; /* max available */
473 }
474
475 static void
pmap_deferred_init(void)476 pmap_deferred_init(void)
477 {
478 /* XXX we COULD realloc our pv_table etc with malloc() but for what? */
479
480 /* create pmap pool */
481 pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0,
482 "pmappool", NULL, IPL_NONE);
483 pool_init(&pmap_pventry_pool, sizeof(struct pv_entry), 0, 0, 0,
484 "pventry", NULL, IPL_HIGH);
485 }
486
487 pmap_t
pmap_create(void)488 pmap_create(void)
489 {
490 static int pmap_initialised = 0;
491 struct pmap *pmap;
492
493 if (!pmap_initialised) {
494 pmap_deferred_init();
495 pmap_initialised = 1;
496 }
497
498 thunk_printf_debug("pmap_create\n");
499 num_pmaps++;
500 #if 0
501 printf("%s: pre alloc: num_pmaps %"PRIu64" (%"PRIu64" kb), "
502 "num_pv_entries %"PRIu64" (%"PRIu64" kb)\n",
503 __func__,
504 (uint64_t) num_pmaps,
505 (uint64_t) num_pmaps * (sizeof(*pmap) + pm_l1_size) / 1024,
506 (uint64_t) num_pv_entries,
507 (uint64_t) num_pv_entries * (sizeof(struct pv_entry)) / 1024);
508 #endif
509
510 pmap = pool_get(&pmap_pool, PR_WAITOK);
511 memset(pmap, 0, sizeof(*pmap));
512 pmap->pm_count = 1;
513 pmap->pm_flags = 0;
514
515 /* claim l1 table */
516 pmap->pm_l1 = kmem_zalloc(pm_l1_size, KM_SLEEP);
517 assert(pmap->pm_l1);
518
519 thunk_printf_debug("\tpmap %p\n", pmap);
520
521 return pmap;
522 }
523
524 void
pmap_destroy(pmap_t pmap)525 pmap_destroy(pmap_t pmap)
526 {
527 struct pmap_l2 *l2tbl;
528 int l1;
529
530 /* if multiple references exist just remove a reference */
531 thunk_printf_debug("pmap_destroy %p\n", pmap);
532 if (--pmap->pm_count > 0)
533 return;
534 num_pmaps--;
535
536 /* safe guard against silly errors */
537 KASSERT((pmap->pm_flags & PM_ACTIVE) == 0);
538 KASSERT(pmap->pm_stats.resident_count == 0);
539 KASSERT(pmap->pm_stats.wired_count == 0);
540 #ifdef DIAGNOSTIC
541 for (l1 = 0; l1 < pm_nl1; l1++) {
542 int l2;
543
544 l2tbl = pmap->pm_l1[l1];
545 if (!l2tbl)
546 continue;
547 for (l2 = 0; l2 < PMAP_L2_NENTRY; l2++) {
548 if (l2tbl->pm_l2[l2])
549 panic("pmap_destroy: pmap isn't empty");
550 }
551 }
552 #endif
553 for (l1 = 0; l1 < pm_nl1; l1++) {
554 l2tbl = pmap->pm_l1[l1];
555 if (!l2tbl)
556 continue;
557 kmem_free(l2tbl, PMAP_L2_SIZE);
558 }
559 kmem_free(pmap->pm_l1, pm_l1_size);
560 pool_put(&pmap_pool, pmap);
561 }
562
563 void
pmap_reference(pmap_t pmap)564 pmap_reference(pmap_t pmap)
565 {
566 thunk_printf_debug("pmap_reference %p\n", (void *) pmap);
567 pmap->pm_count++;
568 }
569
570 long
pmap_resident_count(pmap_t pmap)571 pmap_resident_count(pmap_t pmap)
572 {
573 return pmap->pm_stats.resident_count;
574 }
575
576 long
pmap_wired_count(pmap_t pmap)577 pmap_wired_count(pmap_t pmap)
578 {
579 return pmap->pm_stats.wired_count;
580 }
581
582 static struct pv_entry *
pv_alloc(void)583 pv_alloc(void)
584 {
585 struct pv_entry *pv;
586
587 num_pv_entries++;
588 pv = pool_get(&pmap_pventry_pool, PR_WAITOK);
589 memset(pv, 0, sizeof(struct pv_entry));
590
591 return pv;
592 }
593
594 static void
pv_free(struct pv_entry * pv)595 pv_free(struct pv_entry *pv)
596 {
597 num_pv_entries--;
598 pool_put(&pmap_pventry_pool, pv);
599 }
600
601 static struct pv_entry *
pv_get(pmap_t pmap,uintptr_t ppn,uintptr_t lpn)602 pv_get(pmap_t pmap, uintptr_t ppn, uintptr_t lpn)
603 {
604 struct pv_entry *pv;
605
606 /* If the head entry's free use that. */
607 pv = &pv_table[ppn];
608 if (pv->pv_pmap == NULL) {
609 pmap->pm_stats.resident_count++;
610 return pv;
611 }
612 /* If this mapping exists already, use that. */
613 for (pv = pv; pv != NULL; pv = pv->pv_next) {
614 if ((pv->pv_pmap == pmap) && (pv->pv_lpn == lpn)) {
615 return pv;
616 }
617 }
618 /* Otherwise, allocate a new entry and link it in after the head. */
619 thunk_printf_debug("pv_get: multiple mapped page ppn %"PRIdPTR", "
620 "lpn %"PRIdPTR"\n", ppn, lpn);
621
622 /* extra sanity */
623 assert(ppn < phys_npages);
624
625 pv = pv_alloc();
626 if (pv == NULL)
627 return NULL;
628
629 pv->pv_next = pv_table[ppn].pv_next;
630 pv_table[ppn].pv_next = pv;
631 pmap->pm_stats.resident_count++;
632
633 return pv;
634 }
635
636 static void
pmap_set_pv(pmap_t pmap,uintptr_t lpn,struct pv_entry * pv)637 pmap_set_pv(pmap_t pmap, uintptr_t lpn, struct pv_entry *pv)
638 {
639 struct pmap_l2 *l2tbl;
640 int l1 = lpn / PMAP_L2_NENTRY;
641 int l2 = lpn % PMAP_L2_NENTRY;
642
643 #ifdef DIAGNOSTIC
644 if (lpn >= pm_nentries)
645 panic("peeing outside box : addr in page around %"PRIx64"\n",
646 (uint64_t) lpn*PAGE_SIZE);
647 #endif
648
649 l2tbl = pmap->pm_l1[l1];
650 if (!l2tbl) {
651 l2tbl = pmap->pm_l1[l1] = kmem_zalloc(PMAP_L2_SIZE, KM_SLEEP);
652 /* should be zero filled */
653 }
654 l2tbl->pm_l2[l2] = pv;
655 }
656
657 static struct pv_entry *
pmap_lookup_pv(pmap_t pmap,uintptr_t lpn)658 pmap_lookup_pv(pmap_t pmap, uintptr_t lpn)
659 {
660 struct pmap_l2 *l2tbl;
661 int l1 = lpn / PMAP_L2_NENTRY;
662 int l2 = lpn % PMAP_L2_NENTRY;
663
664 if (lpn >= pm_nentries)
665 return NULL;
666
667 l2tbl = pmap->pm_l1[l1];
668 if (l2tbl)
669 return l2tbl->pm_l2[l2];
670 return NULL;
671 }
672
673 /*
674 * Check if the given page fault was our reference / modified emulation fault;
675 * if so return true otherwise return false and let uvm handle it
676 */
677 bool
pmap_fault(pmap_t pmap,vaddr_t va,vm_prot_t * atype)678 pmap_fault(pmap_t pmap, vaddr_t va, vm_prot_t *atype)
679 {
680 struct pv_entry *pv, *ppv;
681 uintptr_t lpn, ppn;
682 int prot, cur_prot, diff;
683
684 thunk_printf_debug("pmap_fault pmap %p, va %p\n", pmap, (void *) va);
685
686 /* get logical page from vaddr */
687 lpn = atop(va - VM_MIN_ADDRESS); /* V->L */
688 pv = pmap_lookup_pv(pmap, lpn);
689
690 /* not known! then it must be UVM's work */
691 if (pv == NULL) {
692 //thunk_printf("%s: no mapping yet for %p\n",
693 // __func__, (void *) va);
694 *atype = VM_PROT_READ; /* assume it was a read */
695 return false;
696 }
697
698 /* determine physical address and lookup 'root' pv_entry */
699 ppn = pv->pv_ppn;
700 ppv = &pv_table[ppn];
701
702 /* if unmanaged we just make sure it is there! */
703 if (ppv->pv_vflags & PV_UNMANAGED) {
704 printf("%s: oops warning unmanaged page %"PRIiPTR" faulted\n",
705 __func__, ppn);
706 /* atype not set */
707 pmap_page_activate(pv);
708 return true;
709 }
710
711 /* check the TLB, if NULL we have a TLB fault */
712 if (tlb[pv->pv_lpn] == NULL) {
713 if (pv->pv_mmap_ppl != THUNK_PROT_NONE) {
714 thunk_printf_debug("%s: tlb fault page lpn %"PRIiPTR"\n",
715 __func__, pv->pv_lpn);
716 pmap_page_activate(pv);
717 return true;
718 }
719 }
720
721 /* determine pmap access type (mmap doesnt need to be 1:1 on VM_PROT_) */
722 prot = pv->pv_prot;
723 cur_prot = VM_PROT_NONE;
724 if (pv->pv_mmap_ppl & THUNK_PROT_READ)
725 cur_prot |= VM_PROT_READ;
726 if (pv->pv_mmap_ppl & THUNK_PROT_WRITE)
727 cur_prot |= VM_PROT_WRITE;
728 if (pv->pv_mmap_ppl & THUNK_PROT_EXEC)
729 cur_prot |= VM_PROT_EXECUTE;
730
731 diff = prot & (prot ^ cur_prot);
732
733 thunk_printf_debug("%s: prot = %d, cur_prot = %d, diff = %d\n",
734 __func__, prot, cur_prot, diff);
735 *atype = VM_PROT_READ; /* assume its a read error */
736 if (diff & VM_PROT_READ) {
737 if ((ppv->pv_pflags & PV_REFERENCED) == 0) {
738 ppv->pv_pflags |= PV_REFERENCED;
739 pmap_update_page(ppn);
740 return true;
741 }
742 panic("pmap: page not readable but marked referenced?");
743 return false;
744 }
745
746 #if 0
747 /* this might be questionable */
748 if (diff & VM_PROT_EXECUTE) {
749 *atype = VM_PROT_EXECUTE; /* assume it was executing */
750 if (prot & VM_PROT_EXECUTE) {
751 if ((ppv->pv_pflags & PV_REFERENCED) == 0) {
752 ppv->pv_pflags |= PV_REFERENCED;
753 pmap_update_page(ppn);
754 return true;
755 }
756 }
757 return false;
758 }
759 #endif
760
761 *atype = VM_PROT_WRITE; /* assume its a write error */
762 if (diff & VM_PROT_WRITE) {
763 if (prot & VM_PROT_WRITE) {
764 /* should be allowed to write */
765 if ((ppv->pv_pflags & PV_MODIFIED) == 0) {
766 /* was marked unmodified */
767 ppv->pv_pflags |= PV_MODIFIED;
768 pmap_update_page(ppn);
769 return true;
770 }
771 }
772 panic("pmap: page not writable but marked modified?");
773 return false;
774 }
775
776 /* not due to our r/m handling, let uvm handle it ! */
777 return false;
778 }
779
780
781 static void
pmap_page_activate(struct pv_entry * pv)782 pmap_page_activate(struct pv_entry *pv)
783 {
784 paddr_t pa = pv->pv_ppn * PAGE_SIZE;
785 vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */
786 uint32_t map_flags;
787 void *addr;
788
789 map_flags = THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED;
790
791 addr = thunk_mmap((void *) va, PAGE_SIZE, pv->pv_mmap_ppl,
792 map_flags, mem_fh, pa);
793 thunk_printf_debug("page_activate: (va %p, pa %p, prot %d, ppl %d) -> %p\n",
794 (void *) va, (void *) pa, pv->pv_prot, pv->pv_mmap_ppl,
795 (void *) addr);
796 if (addr != (void *) va)
797 panic("pmap_page_activate: mmap failed (expected %p got %p): %d",
798 (void *)va, addr, thunk_geterrno());
799
800 tlb[pv->pv_lpn] = NULL;
801 if (pv->pv_mmap_ppl != THUNK_PROT_NONE)
802 tlb[pv->pv_lpn] = pv;
803 }
804
805 static void
pmap_page_deactivate(struct pv_entry * pv)806 pmap_page_deactivate(struct pv_entry *pv)
807 {
808 paddr_t pa = pv->pv_ppn * PAGE_SIZE;
809 vaddr_t va = pv->pv_lpn * PAGE_SIZE + VM_MIN_ADDRESS; /* L->V */
810 uint32_t map_flags;
811 void *addr;
812
813 /* don't try to unmap pv entries that are already unmapped */
814 if (!tlb[pv->pv_lpn])
815 return;
816
817 if (tlb[pv->pv_lpn]->pv_mmap_ppl == THUNK_PROT_NONE)
818 goto deactivate;
819
820 map_flags = THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED;
821 addr = thunk_mmap((void *) va, PAGE_SIZE, THUNK_PROT_NONE,
822 map_flags, mem_fh, pa);
823 thunk_printf_debug("page_deactivate: (va %p, pa %p, ppl %d) -> %p\n",
824 (void *) va, (void *) pa, pv->pv_mmap_ppl, (void *) addr);
825 if (addr != (void *) va)
826 panic("pmap_page_deactivate: mmap failed");
827
828 deactivate:
829 tlb[pv->pv_lpn] = NULL;
830 }
831
832 static void
pv_update(struct pv_entry * pv)833 pv_update(struct pv_entry *pv)
834 {
835 int pflags, vflags;
836 int mmap_ppl;
837
838 /* get our per-physical-page flags */
839 pflags = pv_table[pv->pv_ppn].pv_pflags;
840 vflags = pv_table[pv->pv_ppn].pv_vflags;
841
842 KASSERT(THUNK_PROT_READ == VM_PROT_READ);
843 KASSERT(THUNK_PROT_WRITE == VM_PROT_WRITE);
844 KASSERT(THUNK_PROT_EXEC == VM_PROT_EXECUTE);
845
846 /* create referenced/modified emulation */
847 if ((pv->pv_prot & VM_PROT_WRITE) &&
848 (pflags & PV_REFERENCED) && (pflags & PV_MODIFIED)) {
849 mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE;
850 } else if ((pv->pv_prot & (VM_PROT_READ | VM_PROT_EXECUTE)) &&
851 (pflags & PV_REFERENCED)) {
852 mmap_ppl = THUNK_PROT_READ;
853 if (pv->pv_prot & VM_PROT_EXECUTE)
854 mmap_ppl |= THUNK_PROT_EXEC;
855 } else {
856 mmap_ppl = THUNK_PROT_NONE;
857 }
858
859 /* unmanaged or wired pages are special; they dont track r/m */
860 if (vflags & (PV_UNMANAGED | PV_WIRED))
861 mmap_ppl = THUNK_PROT_READ | THUNK_PROT_WRITE;
862
863 pv->pv_mmap_ppl = mmap_ppl;
864 }
865
866 /* update mapping of a physical page */
867 static void
pmap_update_page(uintptr_t ppn)868 pmap_update_page(uintptr_t ppn)
869 {
870 struct pv_entry *pv;
871
872 for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) {
873 thunk_printf_debug("pmap_update_page: ppn %"PRIdPTR", pv->pv_map = %p\n",
874 ppn, pv->pv_pmap);
875 if (pv->pv_pmap != NULL) {
876 pv_update(pv);
877 if (pv->pv_pmap->pm_flags & PM_ACTIVE)
878 pmap_page_activate(pv);
879 else
880 pmap_page_deactivate(pv)
881 ;
882 }
883 }
884 }
885
886 static int
pmap_do_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,uint flags,int unmanaged)887 pmap_do_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, uint flags, int unmanaged)
888 {
889 struct pv_entry *pv, *ppv;
890 uintptr_t ppn, lpn;
891 int s;
892
893 /* to page numbers */
894 ppn = atop(pa);
895 lpn = atop(va - VM_MIN_ADDRESS); /* V->L */
896 #ifdef DIAGNOSTIC
897 if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_KERNEL_ADDRESS))
898 panic("pmap_do_enter: invalid va issued\n");
899 #endif
900
901 /* raise interrupt level */
902 s = splvm();
903
904 /* remove existing mapping at this lpn */
905 pv = pmap_lookup_pv(pmap, lpn);
906 if (pv && pv->pv_ppn != ppn)
907 pmap_remove(pmap, va, va + PAGE_SIZE);
908
909 /* get our entry */
910 ppv = &pv_table[ppn];
911 pv = pv_get(pmap, ppn, lpn); /* get our (copy) of pv entry */
912
913 /* and adjust stats */
914 if (pv == NULL)
915 panic("pamp_do_enter: didn't find pv entry!");
916 if (pv->pv_vflags & PV_WIRED)
917 pmap->pm_stats.wired_count--;
918
919 /* enter our details */
920 pv->pv_pmap = pmap;
921 pv->pv_ppn = ppn;
922 pv->pv_lpn = lpn;
923 pv->pv_prot = prot;
924 pv->pv_vflags = 0;
925 /* pv->pv_next = NULL; */ /* might confuse linked list? */
926 if (flags & PMAP_WIRED)
927 pv->pv_vflags |= PV_WIRED;
928
929 if (unmanaged) {
930 /* dont track r/m */
931 pv->pv_vflags |= PV_UNMANAGED;
932 } else {
933 if (flags & VM_PROT_WRITE)
934 ppv->pv_pflags |= PV_REFERENCED | PV_MODIFIED;
935 else if (flags & (VM_PROT_ALL))
936 ppv->pv_pflags |= PV_REFERENCED;
937 }
938
939 /* map it in */
940 pmap_update_page(ppn);
941 pmap_set_pv(pmap, lpn, pv);
942
943 /* adjust stats */
944 if (pv->pv_vflags & PV_WIRED)
945 pmap->pm_stats.wired_count++;
946
947 splx(s);
948
949 /* activate page directly when on active pmap */
950 if (pmap->pm_flags & PM_ACTIVE)
951 pmap_page_activate(pv);
952
953 return 0;
954 }
955
956 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)957 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
958 {
959 thunk_printf_debug("pmap_enter %p : v %p, p %p, prot %d, flags %d\n",
960 (void *) pmap, (void *) va, (void *) pa, (int) prot, (int) flags);
961 return pmap_do_enter(pmap, va, pa, prot, flags, 0);
962 }
963
964 /* release the pv_entry for a mapping. Code derived also from hp300 pmap */
965 static void
pv_release(pmap_t pmap,uintptr_t ppn,uintptr_t lpn)966 pv_release(pmap_t pmap, uintptr_t ppn, uintptr_t lpn)
967 {
968 struct pv_entry *pv, *npv;
969
970 thunk_printf_debug("pv_release ppn %"PRIdPTR", lpn %"PRIdPTR"\n", ppn, lpn);
971 pv = &pv_table[ppn];
972 /*
973 * If it is the first entry on the list, it is actually
974 * in the header and we must copy the following entry up
975 * to the header. Otherwise we must search the list for
976 * the entry. In either case we free the now unused entry.
977 */
978 if ((pmap == pv->pv_pmap) && (lpn == pv->pv_lpn)) {
979 npv = pv->pv_next;
980 if (npv) {
981 /* pull up first entry from chain. */
982 memcpy(pv, npv, offsetof(struct pv_entry, pv_pflags));
983 pmap_set_pv(pv->pv_pmap, pv->pv_lpn, pv);
984 pv_free(npv);
985 } else {
986 memset(pv, 0, offsetof(struct pv_entry, pv_pflags));
987 }
988 } else {
989 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
990 if ((pmap == npv->pv_pmap) && (lpn == npv->pv_lpn))
991 break;
992 pv = npv;
993 }
994 KASSERT(npv != NULL);
995 pv->pv_next = npv->pv_next;
996 pv_free(npv);
997 }
998 pmap_set_pv(pmap, lpn, NULL);
999 pmap->pm_stats.resident_count--;
1000 }
1001
1002 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)1003 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1004 {
1005 uintptr_t slpn, elpn, lpn;
1006 struct pv_entry *pv;
1007 int s;
1008
1009 slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */
1010 elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */
1011
1012 thunk_printf_debug("pmap_remove() called from "
1013 "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn);
1014
1015 s = splvm();
1016 for (lpn = slpn; lpn < elpn; lpn++) {
1017 pv = pmap_lookup_pv(pmap, lpn);
1018 if (pv != NULL) {
1019 if (pmap->pm_flags & PM_ACTIVE) {
1020 pmap_page_deactivate(pv);
1021 // MEMC_WRITE(pv->pv_deactivate);
1022 // cpu_cache_flush();
1023 }
1024 pmap_set_pv(pmap, lpn, NULL);
1025 if (pv->pv_vflags & PV_WIRED)
1026 pmap->pm_stats.wired_count--;
1027 pv_release(pmap, pv->pv_ppn, lpn);
1028 }
1029 }
1030 splx(s);
1031 }
1032
1033 bool
pmap_remove_all(pmap_t pmap)1034 pmap_remove_all(pmap_t pmap)
1035 {
1036 /* just a hint that all the entries are to be removed */
1037 thunk_printf_debug("pmap_remove_all() dummy called\n");
1038
1039 /* we dont do anything with the kernel pmap */
1040 if (pmap == pmap_kernel())
1041 return false;
1042
1043 #if 0
1044 /* remove all mappings in one-go; not needed */
1045 pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
1046 thunk_munmap((void *) VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
1047 #endif
1048 #if 0
1049 /* remove all cached info from the pages */
1050 thunk_msync(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS,
1051 THUNK_MS_SYNC | THUNK_MS_INVALIDATE);
1052 #endif
1053 return false;
1054 }
1055
1056 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1057 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1058 {
1059 struct pv_entry *pv;
1060 intptr_t slpn, elpn, lpn;
1061 int s;
1062
1063 if (prot == VM_PROT_NONE) {
1064 pmap_remove(pmap, sva, eva);
1065 return;
1066 }
1067 if (prot & VM_PROT_WRITE)
1068 return; /* apparently we're meant to */
1069 if (pmap == pmap_kernel())
1070 return; /* can't restrict kernel w/o unmapping. */
1071
1072 slpn = atop(sva - VM_MIN_ADDRESS); /* V->L */
1073 elpn = atop(eva - VM_MIN_ADDRESS); /* V->L */
1074
1075 thunk_printf_debug("pmap_protect() called from "
1076 "lpn %"PRIdPTR" to lpn %"PRIdPTR"\n", slpn, elpn);
1077
1078 s = splvm();
1079 for (lpn = slpn; lpn < elpn; lpn++) {
1080 pv = pmap_lookup_pv(pmap, lpn);
1081 if (pv != NULL) {
1082 pv->pv_prot &= prot;
1083 pv_update(pv);
1084 if (pv->pv_pmap->pm_flags & PM_ACTIVE)
1085 pmap_page_activate(pv);
1086 }
1087 }
1088 splx(s);
1089 }
1090
1091 void
pmap_unwire(pmap_t pmap,vaddr_t va)1092 pmap_unwire(pmap_t pmap, vaddr_t va)
1093 {
1094 struct pv_entry *pv;
1095 intptr_t lpn;
1096
1097 thunk_printf_debug("pmap_unwire called va = %p\n", (void *) va);
1098 if (pmap == NULL)
1099 return;
1100
1101 lpn = atop(va - VM_MIN_ADDRESS); /* V->L */
1102 pv = pmap_lookup_pv(pmap, lpn);
1103 if (pv == NULL)
1104 return;
1105 /* but is it wired? */
1106 if ((pv->pv_vflags & PV_WIRED) == 0)
1107 return;
1108 pmap->pm_stats.wired_count--;
1109 pv->pv_vflags &= ~PV_WIRED;
1110
1111 /* XXX needed? */
1112 pmap_update_page(pv->pv_ppn);
1113 }
1114
1115 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * ppa)1116 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *ppa)
1117 {
1118 struct pv_entry *pv;
1119 intptr_t lpn;
1120
1121 thunk_printf_debug("pmap_extract: extracting va %p\n", (void *) va);
1122 #ifdef DIAGNOSTIC
1123 if ((va < VM_MIN_ADDRESS) || (va > VM_MAX_KERNEL_ADDRESS)) {
1124 thunk_printf_debug("pmap_extract: invalid va issued\n");
1125 thunk_printf("%p not in [%p, %p]\n", (void *) va,
1126 (void *) VM_MIN_ADDRESS, (void *) VM_MAX_KERNEL_ADDRESS);
1127 return false;
1128 }
1129 #endif
1130 lpn = atop(va - VM_MIN_ADDRESS); /* V->L */
1131 pv = pmap_lookup_pv(pmap, lpn);
1132
1133 if (pv == NULL)
1134 return false;
1135 if (ppa)
1136 *ppa = ptoa(pv->pv_ppn);
1137 return true;
1138 }
1139
1140 /*
1141 * Enter an unmanaged, `wired' kernel mapping.
1142 * Only to be removed by pmap_kremove()
1143 */
1144 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1145 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1146 {
1147 thunk_printf_debug("pmap_kenter_pa : v %p, p %p, prot %d, flags %d\n",
1148 (void *) va, (void *) pa, (int) prot, (int) flags);
1149 pmap_do_enter(pmap_kernel(), va, pa, prot, prot | PMAP_WIRED, 1);
1150 }
1151
1152 void
pmap_kremove(vaddr_t va,vsize_t size)1153 pmap_kremove(vaddr_t va, vsize_t size)
1154 {
1155 pmap_remove(pmap_kernel(), va, va + size);
1156 }
1157
1158 void
pmap_copy(pmap_t dst_map,pmap_t src_map,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1159 pmap_copy(pmap_t dst_map, pmap_t src_map, vaddr_t dst_addr, vsize_t len,
1160 vaddr_t src_addr)
1161 {
1162 thunk_printf_debug("pmap_copy (dummy)\n");
1163 }
1164
1165 void
pmap_update(pmap_t pmap)1166 pmap_update(pmap_t pmap)
1167 {
1168 thunk_printf_debug("pmap_update (dummy)\n");
1169 }
1170
1171 void
pmap_activate(struct lwp * l)1172 pmap_activate(struct lwp *l)
1173 {
1174 struct proc *p = l->l_proc;
1175 pmap_t pmap;
1176
1177 pmap = p->p_vmspace->vm_map.pmap;
1178 thunk_printf_debug("pmap_activate for lwp %p, pmap = %p\n", l, pmap);
1179
1180 if (pmap == pmap_kernel())
1181 return; /* kernel pmap is always active */
1182
1183 KASSERT(active_pmap == NULL);
1184 KASSERT((pmap->pm_flags & PM_ACTIVE) == 0);
1185
1186 active_pmap = pmap;
1187 pmap->pm_flags |= PM_ACTIVE;
1188 }
1189
1190 void
pmap_deactivate(struct lwp * l)1191 pmap_deactivate(struct lwp *l)
1192 {
1193 struct proc *p = l->l_proc;
1194 struct pv_entry *pv;
1195 struct pmap_l2 *l2tbl;
1196 pmap_t pmap;
1197 int l1, l2;
1198
1199 pmap = p->p_vmspace->vm_map.pmap;
1200 thunk_printf_debug("pmap_DEactivate for lwp %p, pmap = %p\n", l, pmap);
1201
1202 if (pmap == pmap_kernel())
1203 return; /* kernel pmap is always active */
1204
1205 KASSERT(pmap == active_pmap);
1206 KASSERT(pmap->pm_flags & PM_ACTIVE);
1207
1208 active_pmap = NULL;
1209 pmap->pm_flags &=~ PM_ACTIVE;
1210
1211 for (l1 = 0; l1 < pm_nl1; l1++) {
1212 l2tbl = pmap->pm_l1[l1];
1213 if (!l2tbl)
1214 continue;
1215 for (l2 = 0; l2 < PMAP_L2_NENTRY; l2++) {
1216 pv = l2tbl->pm_l2[l2];
1217 if (pv) {
1218 pmap_page_deactivate(pv);
1219 // MEMC_WRITE(pmap->pm_entries[i]->pv_deactivate);
1220 }
1221 }
1222 }
1223
1224 /* dummy */
1225 // cpu_cache_flush();
1226 }
1227
1228 void
pmap_zero_page(paddr_t pa)1229 pmap_zero_page(paddr_t pa)
1230 {
1231 char *blob;
1232
1233 thunk_printf_debug("pmap_zero_page: pa %p\n", (void *) pa);
1234
1235 if (pa & (PAGE_SIZE-1))
1236 panic("%s: unaligned address passed : %p\n", __func__, (void *) pa);
1237
1238 blob = thunk_mmap(pm_tmp_p0, PAGE_SIZE,
1239 THUNK_PROT_READ | THUNK_PROT_WRITE,
1240 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
1241 mem_fh, pa);
1242 if (blob != pm_tmp_p0)
1243 panic("%s: couldn't get mapping", __func__);
1244
1245 memset(blob, 0, PAGE_SIZE);
1246
1247 thunk_munmap(blob, PAGE_SIZE);
1248 }
1249
1250 void
pmap_copy_page(paddr_t src_pa,paddr_t dst_pa)1251 pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
1252 {
1253 char *sblob, *dblob;
1254
1255 if (src_pa & (PAGE_SIZE-1))
1256 panic("%s: unaligned address passed : %p\n", __func__, (void *) src_pa);
1257 if (dst_pa & (PAGE_SIZE-1))
1258 panic("%s: unaligned address passed : %p\n", __func__, (void *) dst_pa);
1259
1260 thunk_printf_debug("pmap_copy_page: pa src %p, pa dst %p\n",
1261 (void *) src_pa, (void *) dst_pa);
1262
1263 /* source */
1264 sblob = thunk_mmap(pm_tmp_p0, PAGE_SIZE,
1265 THUNK_PROT_READ,
1266 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
1267 mem_fh, src_pa);
1268 if (sblob != pm_tmp_p0)
1269 panic("%s: couldn't get src mapping", __func__);
1270
1271 /* destination */
1272 dblob = thunk_mmap(pm_tmp_p1, PAGE_SIZE,
1273 THUNK_PROT_READ | THUNK_PROT_WRITE,
1274 THUNK_MAP_FILE | THUNK_MAP_FIXED | THUNK_MAP_SHARED,
1275 mem_fh, dst_pa);
1276 if (dblob != pm_tmp_p1)
1277 panic("%s: couldn't get dst mapping", __func__);
1278
1279 memcpy(dblob, sblob, PAGE_SIZE);
1280
1281 thunk_munmap(sblob, PAGE_SIZE);
1282 thunk_munmap(dblob, PAGE_SIZE);
1283 }
1284
1285 /* change access permissions on a given physical page */
1286 void
pmap_page_protect(struct vm_page * page,vm_prot_t prot)1287 pmap_page_protect(struct vm_page *page, vm_prot_t prot)
1288 {
1289 intptr_t ppn;
1290 struct pv_entry *pv, *npv;
1291
1292 ppn = atop(VM_PAGE_TO_PHYS(page));
1293 thunk_printf_debug("pmap_page_protect page %"PRIiPTR" to prot %d\n", ppn, prot);
1294
1295 if (prot == VM_PROT_NONE) {
1296 /* visit all mappings */
1297 npv = pv = &pv_table[ppn];
1298 while ((pv != NULL) && (pv->pv_pmap != NULL)) {
1299 /* skip unmanaged entries */
1300 if (pv->pv_vflags & PV_UNMANAGED) {
1301 pv = pv->pv_next;
1302 continue;
1303 }
1304
1305 /* if in an active pmap deactivate */
1306 if (pv->pv_pmap->pm_flags & PM_ACTIVE)
1307 pmap_page_deactivate(pv);
1308
1309 /* if not on the head, remember our next */
1310 if (pv != &pv_table[ppn])
1311 npv = pv->pv_next;
1312
1313 /* remove from pmap */
1314 pmap_set_pv(pv->pv_pmap, pv->pv_lpn, NULL);
1315 if (pv->pv_vflags & PV_WIRED)
1316 pv->pv_pmap->pm_stats.wired_count--;
1317 pv_release(pv->pv_pmap, ppn, pv->pv_lpn);
1318
1319 pv = npv;
1320 }
1321 } else if (prot != VM_PROT_ALL) {
1322 /* visit all mappings */
1323 for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next) {
1324 /* if managed and in a pmap restrict access */
1325 if ((pv->pv_pmap != NULL) &&
1326 ((pv->pv_vflags & PV_UNMANAGED) == 0)) {
1327 pv->pv_prot &= prot;
1328 pv_update(pv);
1329 /* if in active pmap (re)activate page */
1330 if (pv->pv_pmap->pm_flags & PM_ACTIVE)
1331 pmap_page_activate(pv);
1332 }
1333 }
1334 }
1335 }
1336
1337 bool
pmap_clear_modify(struct vm_page * page)1338 pmap_clear_modify(struct vm_page *page)
1339 {
1340 struct pv_entry *pv;
1341 uintptr_t ppn;
1342 bool rv;
1343
1344 ppn = atop(VM_PAGE_TO_PHYS(page));
1345 rv = pmap_is_modified(page);
1346
1347 thunk_printf_debug("pmap_clear_modify page %"PRIiPTR"\n", ppn);
1348
1349 /* if marked modified, clear it in all the pmap's referencing it */
1350 if (rv) {
1351 /* if its marked modified in a kernel mapping, don't clear it */
1352 for (pv = &pv_table[ppn]; pv != NULL; pv = pv->pv_next)
1353 if (pv->pv_pmap == pmap_kernel() &&
1354 (pv->pv_prot & VM_PROT_WRITE))
1355 return rv;
1356 /* clear it */
1357 pv_table[ppn].pv_pflags &= ~PV_MODIFIED;
1358 pmap_update_page(ppn);
1359 }
1360 return rv;
1361 }
1362
1363 bool
pmap_clear_reference(struct vm_page * page)1364 pmap_clear_reference(struct vm_page *page)
1365 {
1366 uintptr_t ppn;
1367 bool rv;
1368
1369 ppn = atop(VM_PAGE_TO_PHYS(page));
1370 rv = pmap_is_referenced(page);
1371
1372 thunk_printf_debug("pmap_clear_reference page %"PRIiPTR"\n", ppn);
1373
1374 if (rv) {
1375 pv_table[ppn].pv_pflags &= ~PV_REFERENCED;
1376 pmap_update_page(ppn);
1377 }
1378 return rv;
1379 }
1380
1381 bool
pmap_is_modified(struct vm_page * page)1382 pmap_is_modified(struct vm_page *page)
1383 {
1384 intptr_t ppn;
1385 bool rv;
1386
1387 ppn = atop(VM_PAGE_TO_PHYS(page));
1388 rv = (pv_table[ppn].pv_pflags & PV_MODIFIED) != 0;
1389
1390 thunk_printf_debug("pmap_is_modified page %"PRIiPTR" : %s\n", ppn, rv?"yes":"no");
1391
1392 return rv;
1393 }
1394
1395 bool
pmap_is_referenced(struct vm_page * page)1396 pmap_is_referenced(struct vm_page *page)
1397 {
1398 intptr_t ppn;
1399
1400 ppn = atop(VM_PAGE_TO_PHYS(page));
1401 thunk_printf_debug("pmap_is_referenced page %"PRIiPTR"\n", ppn);
1402
1403 return (pv_table[ppn].pv_pflags & PV_REFERENCED) != 0;
1404 }
1405
1406 paddr_t
pmap_phys_address(paddr_t cookie)1407 pmap_phys_address(paddr_t cookie)
1408 {
1409 return ptoa(cookie);
1410 }
1411
1412 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)1413 pmap_growkernel(vaddr_t maxkvaddr)
1414 {
1415 thunk_printf_debug("pmap_growkernel: till %p (adding %"PRIu64" KB)\n",
1416 (void *) maxkvaddr,
1417 (uint64_t) (maxkvaddr - kmem_kvm_cur_end)/1024);
1418 if (maxkvaddr > kmem_kvm_end)
1419 return kmem_kvm_end;
1420 kmem_kvm_cur_end = maxkvaddr;
1421 return kmem_kvm_cur_end;
1422 }
1423
1424