1433d6423SLionel Sambuc 2433d6423SLionel Sambuc #define _SYSTEM 1 3433d6423SLionel Sambuc 4433d6423SLionel Sambuc #include <minix/callnr.h> 5433d6423SLionel Sambuc #include <minix/com.h> 6433d6423SLionel Sambuc #include <minix/config.h> 7433d6423SLionel Sambuc #include <minix/const.h> 8433d6423SLionel Sambuc #include <minix/ds.h> 9433d6423SLionel Sambuc #include <minix/endpoint.h> 10433d6423SLionel Sambuc #include <minix/minlib.h> 11433d6423SLionel Sambuc #include <minix/type.h> 12433d6423SLionel Sambuc #include <minix/ipc.h> 13433d6423SLionel Sambuc #include <minix/sysutil.h> 14433d6423SLionel Sambuc #include <minix/syslib.h> 15433d6423SLionel Sambuc #include <minix/safecopies.h> 16433d6423SLionel Sambuc #include <minix/cpufeature.h> 17433d6423SLionel Sambuc #include <minix/bitmap.h> 18433d6423SLionel Sambuc #include <minix/debug.h> 19433d6423SLionel Sambuc 20433d6423SLionel Sambuc #include <errno.h> 21433d6423SLionel Sambuc #include <stdlib.h> 22433d6423SLionel Sambuc #include <assert.h> 23433d6423SLionel Sambuc #include <string.h> 24433d6423SLionel Sambuc #include <env.h> 25433d6423SLionel Sambuc #include <stdio.h> 26433d6423SLionel Sambuc #include <fcntl.h> 27433d6423SLionel Sambuc #include <stdlib.h> 28433d6423SLionel Sambuc 29433d6423SLionel Sambuc #include "proto.h" 30433d6423SLionel Sambuc #include "glo.h" 31433d6423SLionel Sambuc #include "util.h" 32433d6423SLionel Sambuc #include "vm.h" 33433d6423SLionel Sambuc #include "sanitycheck.h" 34433d6423SLionel Sambuc 35433d6423SLionel Sambuc static int vm_self_pages; 36433d6423SLionel Sambuc 37433d6423SLionel Sambuc /* PDE used to map in kernel, kernel physical address. */ 38433d6423SLionel Sambuc #define MAX_PAGEDIR_PDES 5 39433d6423SLionel Sambuc static struct pdm { 40433d6423SLionel Sambuc int pdeno; 41433d6423SLionel Sambuc u32_t val; 42433d6423SLionel Sambuc phys_bytes phys; 43433d6423SLionel Sambuc u32_t *page_directories; 44433d6423SLionel Sambuc } pagedir_mappings[MAX_PAGEDIR_PDES]; 45433d6423SLionel Sambuc 46433d6423SLionel Sambuc static multiboot_module_t *kern_mb_mod = NULL; 47433d6423SLionel Sambuc static size_t kern_size = 0; 48433d6423SLionel Sambuc static int kern_start_pde = -1; 49433d6423SLionel Sambuc 50433d6423SLionel Sambuc /* big page size available in hardware? */ 51433d6423SLionel Sambuc static int bigpage_ok = 1; 52433d6423SLionel Sambuc 53433d6423SLionel Sambuc /* Our process table entry. */ 54433d6423SLionel Sambuc struct vmproc *vmprocess = &vmproc[VM_PROC_NR]; 55433d6423SLionel Sambuc 56433d6423SLionel Sambuc /* Spare memory, ready to go after initialization, to avoid a 57433d6423SLionel Sambuc * circular dependency on allocating memory and writing it into VM's 58433d6423SLionel Sambuc * page table. 59433d6423SLionel Sambuc */ 60433d6423SLionel Sambuc #if SANITYCHECKS 61433d6423SLionel Sambuc #define SPAREPAGES 200 62433d6423SLionel Sambuc #define STATIC_SPAREPAGES 190 63433d6423SLionel Sambuc #else 64433d6423SLionel Sambuc #ifdef __arm__ 65433d6423SLionel Sambuc # define SPAREPAGES 150 66433d6423SLionel Sambuc # define STATIC_SPAREPAGES 140 67433d6423SLionel Sambuc #else 68433d6423SLionel Sambuc # define SPAREPAGES 20 69433d6423SLionel Sambuc # define STATIC_SPAREPAGES 15 70433d6423SLionel Sambuc #endif /* __arm__ */ 71433d6423SLionel Sambuc #endif 72433d6423SLionel Sambuc 73433d6423SLionel Sambuc #ifdef __i386__ 74433d6423SLionel Sambuc static u32_t global_bit = 0; 75433d6423SLionel Sambuc #endif 76433d6423SLionel Sambuc 77433d6423SLionel Sambuc #define SPAREPAGEDIRS 1 78433d6423SLionel Sambuc #define STATIC_SPAREPAGEDIRS 1 79433d6423SLionel Sambuc 80433d6423SLionel Sambuc int missing_sparedirs = SPAREPAGEDIRS; 81433d6423SLionel Sambuc static struct { 82433d6423SLionel Sambuc void *pagedir; 83433d6423SLionel Sambuc phys_bytes phys; 84433d6423SLionel Sambuc } sparepagedirs[SPAREPAGEDIRS]; 85433d6423SLionel Sambuc 8663483e02SCristiano Giuffrida #define is_staticaddr(v) ((vir_bytes) (v) < VM_OWN_HEAPSTART) 87433d6423SLionel Sambuc 88433d6423SLionel Sambuc #define MAX_KERNMAPPINGS 10 89433d6423SLionel Sambuc static struct { 90433d6423SLionel Sambuc phys_bytes phys_addr; /* Physical addr. */ 91433d6423SLionel Sambuc phys_bytes len; /* Length in bytes. */ 92433d6423SLionel Sambuc vir_bytes vir_addr; /* Offset in page table. */ 93433d6423SLionel Sambuc int flags; 94433d6423SLionel Sambuc } kern_mappings[MAX_KERNMAPPINGS]; 95433d6423SLionel Sambuc int kernmappings = 0; 96433d6423SLionel Sambuc 97433d6423SLionel Sambuc /* Clicks must be pages, as 98433d6423SLionel Sambuc * - they must be page aligned to map them 99433d6423SLionel Sambuc * - they must be a multiple of the page size 100433d6423SLionel Sambuc * - it's inconvenient to have them bigger than pages, because we often want 101433d6423SLionel Sambuc * just one page 102433d6423SLionel Sambuc * May as well require them to be equal then. 103433d6423SLionel Sambuc */ 104433d6423SLionel Sambuc #if CLICK_SIZE != VM_PAGE_SIZE 105433d6423SLionel Sambuc #error CLICK_SIZE must be page size. 106433d6423SLionel Sambuc #endif 107433d6423SLionel Sambuc 108433d6423SLionel Sambuc static void *spare_pagequeue; 109433d6423SLionel Sambuc static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES] 110433d6423SLionel Sambuc __aligned(VM_PAGE_SIZE); 111433d6423SLionel Sambuc 112433d6423SLionel Sambuc #if defined(__arm__) 113433d6423SLionel Sambuc static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE); 114433d6423SLionel Sambuc #endif 115433d6423SLionel Sambuc 116*10e6ba68SBen Gras void pt_assert(pt_t *pt) 117*10e6ba68SBen Gras { 118*10e6ba68SBen Gras char dir[4096]; 119*10e6ba68SBen Gras pt_clearmapcache(); 120*10e6ba68SBen Gras if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 121*10e6ba68SBen Gras panic("VMCTL_FLUSHTLB failed"); 122*10e6ba68SBen Gras } 123*10e6ba68SBen Gras sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0); 124*10e6ba68SBen Gras assert(!memcmp(dir, pt->pt_dir, sizeof(dir))); 125*10e6ba68SBen Gras } 126*10e6ba68SBen Gras 127433d6423SLionel Sambuc #if SANITYCHECKS 128433d6423SLionel Sambuc /*===========================================================================* 129433d6423SLionel Sambuc * pt_sanitycheck * 130433d6423SLionel Sambuc *===========================================================================*/ 131433d6423SLionel Sambuc void pt_sanitycheck(pt_t *pt, const char *file, int line) 132433d6423SLionel Sambuc { 133433d6423SLionel Sambuc /* Basic pt sanity check. */ 134433d6423SLionel Sambuc int slot; 135433d6423SLionel Sambuc 136433d6423SLionel Sambuc MYASSERT(pt); 137433d6423SLionel Sambuc MYASSERT(pt->pt_dir); 138433d6423SLionel Sambuc MYASSERT(pt->pt_dir_phys); 139433d6423SLionel Sambuc 140433d6423SLionel Sambuc for(slot = 0; slot < ELEMENTS(vmproc); slot++) { 141433d6423SLionel Sambuc if(pt == &vmproc[slot].vm_pt) 142433d6423SLionel Sambuc break; 143433d6423SLionel Sambuc } 144433d6423SLionel Sambuc 145433d6423SLionel Sambuc if(slot >= ELEMENTS(vmproc)) { 146433d6423SLionel Sambuc panic("pt_sanitycheck: passed pt not in any proc"); 147433d6423SLionel Sambuc } 148433d6423SLionel Sambuc 149433d6423SLionel Sambuc MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK); 150433d6423SLionel Sambuc } 151433d6423SLionel Sambuc #endif 152433d6423SLionel Sambuc 153433d6423SLionel Sambuc /*===========================================================================* 154433d6423SLionel Sambuc * findhole * 155433d6423SLionel Sambuc *===========================================================================*/ 156433d6423SLionel Sambuc static u32_t findhole(int pages) 157433d6423SLionel Sambuc { 158433d6423SLionel Sambuc /* Find a space in the virtual address space of VM. */ 159433d6423SLionel Sambuc u32_t curv; 160433d6423SLionel Sambuc int pde = 0, try_restart; 16150b7f13fSCristiano Giuffrida static void *lastv = 0; 162433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt; 163433d6423SLionel Sambuc vir_bytes vmin, vmax; 164433d6423SLionel Sambuc u32_t holev = NO_MEM; 165433d6423SLionel Sambuc int holesize = -1; 166433d6423SLionel Sambuc 16763483e02SCristiano Giuffrida vmin = VM_OWN_MMAPBASE; 16863483e02SCristiano Giuffrida vmax = VM_OWN_MMAPTOP; 169433d6423SLionel Sambuc 170433d6423SLionel Sambuc /* Input sanity check. */ 171433d6423SLionel Sambuc assert(vmin + VM_PAGE_SIZE >= vmin); 172433d6423SLionel Sambuc assert(vmax >= vmin + VM_PAGE_SIZE); 173433d6423SLionel Sambuc assert((vmin % VM_PAGE_SIZE) == 0); 174433d6423SLionel Sambuc assert((vmax % VM_PAGE_SIZE) == 0); 175433d6423SLionel Sambuc assert(pages > 0); 176433d6423SLionel Sambuc 17750b7f13fSCristiano Giuffrida curv = (u32_t) lastv; 178433d6423SLionel Sambuc if(curv < vmin || curv >= vmax) 179433d6423SLionel Sambuc curv = vmin; 180433d6423SLionel Sambuc 181433d6423SLionel Sambuc try_restart = 1; 182433d6423SLionel Sambuc 183433d6423SLionel Sambuc /* Start looking for a free page starting at vmin. */ 184433d6423SLionel Sambuc while(curv < vmax) { 185433d6423SLionel Sambuc int pte; 186433d6423SLionel Sambuc 187433d6423SLionel Sambuc assert(curv >= vmin); 188433d6423SLionel Sambuc assert(curv < vmax); 189433d6423SLionel Sambuc 190433d6423SLionel Sambuc pde = ARCH_VM_PDE(curv); 191433d6423SLionel Sambuc pte = ARCH_VM_PTE(curv); 192433d6423SLionel Sambuc 193433d6423SLionel Sambuc if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && 194433d6423SLionel Sambuc (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 195433d6423SLionel Sambuc /* there is a page here - so keep looking for holes */ 196433d6423SLionel Sambuc holev = NO_MEM; 197433d6423SLionel Sambuc holesize = 0; 198433d6423SLionel Sambuc } else { 199433d6423SLionel Sambuc /* there is no page here - so we have a hole, a bigger 200433d6423SLionel Sambuc * one if we already had one 201433d6423SLionel Sambuc */ 202433d6423SLionel Sambuc if(holev == NO_MEM) { 203433d6423SLionel Sambuc holev = curv; 204433d6423SLionel Sambuc holesize = 1; 205433d6423SLionel Sambuc } else holesize++; 206433d6423SLionel Sambuc 207433d6423SLionel Sambuc assert(holesize > 0); 208433d6423SLionel Sambuc assert(holesize <= pages); 209433d6423SLionel Sambuc 210433d6423SLionel Sambuc /* if it's big enough, return it */ 211433d6423SLionel Sambuc if(holesize == pages) { 21250b7f13fSCristiano Giuffrida lastv = (void*) (curv + VM_PAGE_SIZE); 213433d6423SLionel Sambuc return holev; 214433d6423SLionel Sambuc } 215433d6423SLionel Sambuc } 216433d6423SLionel Sambuc 217433d6423SLionel Sambuc curv+=VM_PAGE_SIZE; 218433d6423SLionel Sambuc 219433d6423SLionel Sambuc /* if we reached the limit, start scanning from the beginning if 220433d6423SLionel Sambuc * we haven't looked there yet 221433d6423SLionel Sambuc */ 222433d6423SLionel Sambuc if(curv >= vmax && try_restart) { 223433d6423SLionel Sambuc try_restart = 0; 224433d6423SLionel Sambuc curv = vmin; 225433d6423SLionel Sambuc } 226433d6423SLionel Sambuc } 227433d6423SLionel Sambuc 228433d6423SLionel Sambuc printf("VM: out of virtual address space in vm\n"); 229433d6423SLionel Sambuc 230433d6423SLionel Sambuc return NO_MEM; 231433d6423SLionel Sambuc } 232433d6423SLionel Sambuc 233433d6423SLionel Sambuc /*===========================================================================* 234433d6423SLionel Sambuc * vm_freepages * 235433d6423SLionel Sambuc *===========================================================================*/ 236433d6423SLionel Sambuc void vm_freepages(vir_bytes vir, int pages) 237433d6423SLionel Sambuc { 238433d6423SLionel Sambuc assert(!(vir % VM_PAGE_SIZE)); 239433d6423SLionel Sambuc 240433d6423SLionel Sambuc if(is_staticaddr(vir)) { 241433d6423SLionel Sambuc printf("VM: not freeing static page\n"); 242433d6423SLionel Sambuc return; 243433d6423SLionel Sambuc } 244433d6423SLionel Sambuc 245433d6423SLionel Sambuc if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir, 246433d6423SLionel Sambuc MAP_NONE, pages*VM_PAGE_SIZE, 0, 247433d6423SLionel Sambuc WMF_OVERWRITE | WMF_FREE) != OK) 248433d6423SLionel Sambuc panic("vm_freepages: pt_writemap failed"); 249433d6423SLionel Sambuc 250433d6423SLionel Sambuc vm_self_pages--; 251433d6423SLionel Sambuc 252433d6423SLionel Sambuc #if SANITYCHECKS 253433d6423SLionel Sambuc /* If SANITYCHECKS are on, flush tlb so accessing freed pages is 254433d6423SLionel Sambuc * always trapped, also if not in tlb. 255433d6423SLionel Sambuc */ 256433d6423SLionel Sambuc if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 257433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed"); 258433d6423SLionel Sambuc } 259433d6423SLionel Sambuc #endif 260433d6423SLionel Sambuc } 261433d6423SLionel Sambuc 262433d6423SLionel Sambuc /*===========================================================================* 263433d6423SLionel Sambuc * vm_getsparepage * 264433d6423SLionel Sambuc *===========================================================================*/ 265433d6423SLionel Sambuc static void *vm_getsparepage(phys_bytes *phys) 266433d6423SLionel Sambuc { 267433d6423SLionel Sambuc void *ptr; 268433d6423SLionel Sambuc if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) { 269433d6423SLionel Sambuc return NULL; 270433d6423SLionel Sambuc } 271433d6423SLionel Sambuc assert(ptr); 272433d6423SLionel Sambuc return ptr; 273433d6423SLionel Sambuc } 274433d6423SLionel Sambuc 275433d6423SLionel Sambuc /*===========================================================================* 276433d6423SLionel Sambuc * vm_getsparepagedir * 277433d6423SLionel Sambuc *===========================================================================*/ 278433d6423SLionel Sambuc static void *vm_getsparepagedir(phys_bytes *phys) 279433d6423SLionel Sambuc { 280433d6423SLionel Sambuc int s; 281433d6423SLionel Sambuc assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS); 282433d6423SLionel Sambuc for(s = 0; s < SPAREPAGEDIRS; s++) { 283433d6423SLionel Sambuc if(sparepagedirs[s].pagedir) { 284433d6423SLionel Sambuc void *sp; 285433d6423SLionel Sambuc sp = sparepagedirs[s].pagedir; 286433d6423SLionel Sambuc *phys = sparepagedirs[s].phys; 287433d6423SLionel Sambuc sparepagedirs[s].pagedir = NULL; 288433d6423SLionel Sambuc missing_sparedirs++; 289433d6423SLionel Sambuc assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS); 290433d6423SLionel Sambuc return sp; 291433d6423SLionel Sambuc } 292433d6423SLionel Sambuc } 293433d6423SLionel Sambuc return NULL; 294433d6423SLionel Sambuc } 295433d6423SLionel Sambuc 296433d6423SLionel Sambuc void *vm_mappages(phys_bytes p, int pages) 297433d6423SLionel Sambuc { 298433d6423SLionel Sambuc vir_bytes loc; 299433d6423SLionel Sambuc int r; 300433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt; 301433d6423SLionel Sambuc 302433d6423SLionel Sambuc /* Where in our virtual address space can we put it? */ 303433d6423SLionel Sambuc loc = findhole(pages); 304433d6423SLionel Sambuc if(loc == NO_MEM) { 305433d6423SLionel Sambuc printf("vm_mappages: findhole failed\n"); 306433d6423SLionel Sambuc return NULL; 307433d6423SLionel Sambuc } 308433d6423SLionel Sambuc 309433d6423SLionel Sambuc /* Map this page into our address space. */ 310433d6423SLionel Sambuc if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages, 311433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW 312433d6423SLionel Sambuc #if defined(__arm__) 313433d6423SLionel Sambuc | ARM_VM_PTE_CACHED 314433d6423SLionel Sambuc #endif 315433d6423SLionel Sambuc , 0)) != OK) { 316433d6423SLionel Sambuc printf("vm_mappages writemap failed\n"); 317433d6423SLionel Sambuc return NULL; 318433d6423SLionel Sambuc } 319433d6423SLionel Sambuc 320433d6423SLionel Sambuc if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 321433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed: %d", r); 322433d6423SLionel Sambuc } 323433d6423SLionel Sambuc 324433d6423SLionel Sambuc assert(loc); 325433d6423SLionel Sambuc 326433d6423SLionel Sambuc return (void *) loc; 327433d6423SLionel Sambuc } 328433d6423SLionel Sambuc 329433d6423SLionel Sambuc static int pt_init_done; 330433d6423SLionel Sambuc 331433d6423SLionel Sambuc /*===========================================================================* 332433d6423SLionel Sambuc * vm_allocpage * 333433d6423SLionel Sambuc *===========================================================================*/ 334433d6423SLionel Sambuc void *vm_allocpages(phys_bytes *phys, int reason, int pages) 335433d6423SLionel Sambuc { 336433d6423SLionel Sambuc /* Allocate a page for use by VM itself. */ 337433d6423SLionel Sambuc phys_bytes newpage; 338433d6423SLionel Sambuc static int level = 0; 339433d6423SLionel Sambuc void *ret; 340433d6423SLionel Sambuc u32_t mem_flags = 0; 341433d6423SLionel Sambuc 342433d6423SLionel Sambuc assert(reason >= 0 && reason < VMP_CATEGORIES); 343433d6423SLionel Sambuc 344433d6423SLionel Sambuc assert(pages > 0); 345433d6423SLionel Sambuc 346433d6423SLionel Sambuc level++; 347433d6423SLionel Sambuc 348433d6423SLionel Sambuc assert(level >= 1); 349433d6423SLionel Sambuc assert(level <= 2); 350433d6423SLionel Sambuc 351433d6423SLionel Sambuc if((level > 1) || !pt_init_done) { 352433d6423SLionel Sambuc void *s; 353433d6423SLionel Sambuc 354433d6423SLionel Sambuc if(pages == 1) s=vm_getsparepage(phys); 355433d6423SLionel Sambuc else if(pages == 4) s=vm_getsparepagedir(phys); 356433d6423SLionel Sambuc else panic("%d pages", pages); 357433d6423SLionel Sambuc 358433d6423SLionel Sambuc level--; 359433d6423SLionel Sambuc if(!s) { 360433d6423SLionel Sambuc util_stacktrace(); 361433d6423SLionel Sambuc printf("VM: warning: out of spare pages\n"); 362433d6423SLionel Sambuc } 363433d6423SLionel Sambuc if(!is_staticaddr(s)) vm_self_pages++; 364433d6423SLionel Sambuc return s; 365433d6423SLionel Sambuc } 366433d6423SLionel Sambuc 367433d6423SLionel Sambuc #if defined(__arm__) 368433d6423SLionel Sambuc if (reason == VMP_PAGEDIR) { 369433d6423SLionel Sambuc mem_flags |= PAF_ALIGN16K; 370433d6423SLionel Sambuc } 371433d6423SLionel Sambuc #endif 372433d6423SLionel Sambuc 373433d6423SLionel Sambuc /* Allocate page of memory for use by VM. As VM 374433d6423SLionel Sambuc * is trusted, we don't have to pre-clear it. 375433d6423SLionel Sambuc */ 376433d6423SLionel Sambuc if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) { 377433d6423SLionel Sambuc level--; 378433d6423SLionel Sambuc printf("VM: vm_allocpage: alloc_mem failed\n"); 379433d6423SLionel Sambuc return NULL; 380433d6423SLionel Sambuc } 381433d6423SLionel Sambuc 382433d6423SLionel Sambuc *phys = CLICK2ABS(newpage); 383433d6423SLionel Sambuc 384433d6423SLionel Sambuc if(!(ret = vm_mappages(*phys, pages))) { 385433d6423SLionel Sambuc level--; 386433d6423SLionel Sambuc printf("VM: vm_allocpage: vm_mappages failed\n"); 387433d6423SLionel Sambuc return NULL; 388433d6423SLionel Sambuc } 389433d6423SLionel Sambuc 390433d6423SLionel Sambuc level--; 391433d6423SLionel Sambuc vm_self_pages++; 392433d6423SLionel Sambuc 393433d6423SLionel Sambuc return ret; 394433d6423SLionel Sambuc } 395433d6423SLionel Sambuc 396433d6423SLionel Sambuc void *vm_allocpage(phys_bytes *phys, int reason) 397433d6423SLionel Sambuc { 398433d6423SLionel Sambuc return vm_allocpages(phys, reason, 1); 399433d6423SLionel Sambuc } 400433d6423SLionel Sambuc 401433d6423SLionel Sambuc /*===========================================================================* 402433d6423SLionel Sambuc * vm_pagelock * 403433d6423SLionel Sambuc *===========================================================================*/ 404433d6423SLionel Sambuc void vm_pagelock(void *vir, int lockflag) 405433d6423SLionel Sambuc { 406433d6423SLionel Sambuc /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */ 407433d6423SLionel Sambuc vir_bytes m = (vir_bytes) vir; 408433d6423SLionel Sambuc int r; 409433d6423SLionel Sambuc u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER; 410433d6423SLionel Sambuc pt_t *pt; 411433d6423SLionel Sambuc 412433d6423SLionel Sambuc pt = &vmprocess->vm_pt; 413433d6423SLionel Sambuc 414433d6423SLionel Sambuc assert(!(m % VM_PAGE_SIZE)); 415433d6423SLionel Sambuc 416433d6423SLionel Sambuc if(!lockflag) 417433d6423SLionel Sambuc flags |= ARCH_VM_PTE_RW; 418433d6423SLionel Sambuc #if defined(__arm__) 419433d6423SLionel Sambuc else 420433d6423SLionel Sambuc flags |= ARCH_VM_PTE_RO; 421433d6423SLionel Sambuc 422433d6423SLionel Sambuc flags |= ARM_VM_PTE_CACHED ; 423433d6423SLionel Sambuc #endif 424433d6423SLionel Sambuc 425433d6423SLionel Sambuc /* Update flags. */ 426433d6423SLionel Sambuc if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE, 427433d6423SLionel Sambuc flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) { 428433d6423SLionel Sambuc panic("vm_lockpage: pt_writemap failed"); 429433d6423SLionel Sambuc } 430433d6423SLionel Sambuc 431433d6423SLionel Sambuc if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 432433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed: %d", r); 433433d6423SLionel Sambuc } 434433d6423SLionel Sambuc 435433d6423SLionel Sambuc return; 436433d6423SLionel Sambuc } 437433d6423SLionel Sambuc 438433d6423SLionel Sambuc /*===========================================================================* 439433d6423SLionel Sambuc * vm_addrok * 440433d6423SLionel Sambuc *===========================================================================*/ 441433d6423SLionel Sambuc int vm_addrok(void *vir, int writeflag) 442433d6423SLionel Sambuc { 443433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt; 444433d6423SLionel Sambuc int pde, pte; 445433d6423SLionel Sambuc vir_bytes v = (vir_bytes) vir; 446433d6423SLionel Sambuc 447433d6423SLionel Sambuc pde = ARCH_VM_PDE(v); 448433d6423SLionel Sambuc pte = ARCH_VM_PTE(v); 449433d6423SLionel Sambuc 450433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 451433d6423SLionel Sambuc printf("addr not ok: missing pde %d\n", pde); 452433d6423SLionel Sambuc return 0; 453433d6423SLionel Sambuc } 454433d6423SLionel Sambuc 455433d6423SLionel Sambuc #if defined(__i386__) 456433d6423SLionel Sambuc if(writeflag && 457433d6423SLionel Sambuc !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) { 458433d6423SLionel Sambuc printf("addr not ok: pde %d present but pde unwritable\n", pde); 459433d6423SLionel Sambuc return 0; 460433d6423SLionel Sambuc } 461433d6423SLionel Sambuc #elif defined(__arm__) 462433d6423SLionel Sambuc if(writeflag && 463433d6423SLionel Sambuc (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) { 464433d6423SLionel Sambuc printf("addr not ok: pde %d present but pde unwritable\n", pde); 465433d6423SLionel Sambuc return 0; 466433d6423SLionel Sambuc } 467433d6423SLionel Sambuc 468433d6423SLionel Sambuc #endif 469433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 470433d6423SLionel Sambuc printf("addr not ok: missing pde %d / pte %d\n", 471433d6423SLionel Sambuc pde, pte); 472433d6423SLionel Sambuc return 0; 473433d6423SLionel Sambuc } 474433d6423SLionel Sambuc 475433d6423SLionel Sambuc #if defined(__i386__) 476433d6423SLionel Sambuc if(writeflag && 477433d6423SLionel Sambuc !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { 478433d6423SLionel Sambuc printf("addr not ok: pde %d / pte %d present but unwritable\n", 479433d6423SLionel Sambuc pde, pte); 480433d6423SLionel Sambuc #elif defined(__arm__) 481433d6423SLionel Sambuc if(writeflag && 482433d6423SLionel Sambuc (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { 483433d6423SLionel Sambuc printf("addr not ok: pde %d / pte %d present but unwritable\n", 484433d6423SLionel Sambuc pde, pte); 485433d6423SLionel Sambuc #endif 486433d6423SLionel Sambuc return 0; 487433d6423SLionel Sambuc } 488433d6423SLionel Sambuc 489433d6423SLionel Sambuc return 1; 490433d6423SLionel Sambuc } 491433d6423SLionel Sambuc 492433d6423SLionel Sambuc /*===========================================================================* 493433d6423SLionel Sambuc * pt_ptalloc * 494433d6423SLionel Sambuc *===========================================================================*/ 495433d6423SLionel Sambuc static int pt_ptalloc(pt_t *pt, int pde, u32_t flags) 496433d6423SLionel Sambuc { 497433d6423SLionel Sambuc /* Allocate a page table and write its address into the page directory. */ 498433d6423SLionel Sambuc int i; 499433d6423SLionel Sambuc phys_bytes pt_phys; 500433d6423SLionel Sambuc u32_t *p; 501433d6423SLionel Sambuc 502433d6423SLionel Sambuc /* Argument must make sense. */ 503433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 504433d6423SLionel Sambuc assert(!(flags & ~(PTF_ALLFLAGS))); 505433d6423SLionel Sambuc 506433d6423SLionel Sambuc /* We don't expect to overwrite page directory entry, nor 507433d6423SLionel Sambuc * storage for the page table. 508433d6423SLionel Sambuc */ 509433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)); 510433d6423SLionel Sambuc assert(!pt->pt_pt[pde]); 511433d6423SLionel Sambuc 512433d6423SLionel Sambuc /* Get storage for the page table. The allocation call may in fact 513433d6423SLionel Sambuc * recursively create the directory entry as a side effect. In that 514433d6423SLionel Sambuc * case, we free the newly allocated page and do nothing else. 515433d6423SLionel Sambuc */ 516433d6423SLionel Sambuc if (!(p = vm_allocpage(&pt_phys, VMP_PAGETABLE))) 517433d6423SLionel Sambuc return ENOMEM; 518433d6423SLionel Sambuc if (pt->pt_pt[pde]) { 519433d6423SLionel Sambuc vm_freepages((vir_bytes) p, 1); 520433d6423SLionel Sambuc assert(pt->pt_pt[pde]); 521433d6423SLionel Sambuc return OK; 522433d6423SLionel Sambuc } 523433d6423SLionel Sambuc pt->pt_pt[pde] = p; 524433d6423SLionel Sambuc 525433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_PT_ENTRIES; i++) 526433d6423SLionel Sambuc pt->pt_pt[pde][i] = 0; /* Empty entry. */ 527433d6423SLionel Sambuc 528433d6423SLionel Sambuc /* Make page directory entry. 529433d6423SLionel Sambuc * The PDE is always 'present,' 'writable,' and 'user accessible,' 530433d6423SLionel Sambuc * relying on the PTE for protection. 531433d6423SLionel Sambuc */ 532433d6423SLionel Sambuc #if defined(__i386__) 533433d6423SLionel Sambuc pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags 534433d6423SLionel Sambuc | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW; 535433d6423SLionel Sambuc #elif defined(__arm__) 536433d6423SLionel Sambuc pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK) 537433d6423SLionel Sambuc | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME 538433d6423SLionel Sambuc #endif 539433d6423SLionel Sambuc 540433d6423SLionel Sambuc return OK; 541433d6423SLionel Sambuc } 542433d6423SLionel Sambuc 543433d6423SLionel Sambuc /*===========================================================================* 544433d6423SLionel Sambuc * pt_ptalloc_in_range * 545433d6423SLionel Sambuc *===========================================================================*/ 546433d6423SLionel Sambuc int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end, 547433d6423SLionel Sambuc u32_t flags, int verify) 548433d6423SLionel Sambuc { 549433d6423SLionel Sambuc /* Allocate all the page tables in the range specified. */ 550433d6423SLionel Sambuc int pde, first_pde, last_pde; 551433d6423SLionel Sambuc 552433d6423SLionel Sambuc first_pde = ARCH_VM_PDE(start); 553433d6423SLionel Sambuc last_pde = ARCH_VM_PDE(end-1); 554433d6423SLionel Sambuc 555433d6423SLionel Sambuc assert(first_pde >= 0); 556433d6423SLionel Sambuc assert(last_pde < ARCH_VM_DIR_ENTRIES); 557433d6423SLionel Sambuc 558433d6423SLionel Sambuc /* Scan all page-directory entries in the range. */ 559433d6423SLionel Sambuc for(pde = first_pde; pde <= last_pde; pde++) { 560433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE)); 561433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 562433d6423SLionel Sambuc int r; 563433d6423SLionel Sambuc if(verify) { 564433d6423SLionel Sambuc printf("pt_ptalloc_in_range: no pde %d\n", pde); 565433d6423SLionel Sambuc return EFAULT; 566433d6423SLionel Sambuc } 567433d6423SLionel Sambuc assert(!pt->pt_dir[pde]); 568433d6423SLionel Sambuc if((r=pt_ptalloc(pt, pde, flags)) != OK) { 569433d6423SLionel Sambuc /* Couldn't do (complete) mapping. 570433d6423SLionel Sambuc * Don't bother freeing any previously 571433d6423SLionel Sambuc * allocated page tables, they're 572433d6423SLionel Sambuc * still writable, don't point to nonsense, 573433d6423SLionel Sambuc * and pt_ptalloc leaves the directory 574433d6423SLionel Sambuc * and other data in a consistent state. 575433d6423SLionel Sambuc */ 576433d6423SLionel Sambuc return r; 577433d6423SLionel Sambuc } 578433d6423SLionel Sambuc assert(pt->pt_pt[pde]); 579433d6423SLionel Sambuc } 580433d6423SLionel Sambuc assert(pt->pt_pt[pde]); 581433d6423SLionel Sambuc assert(pt->pt_dir[pde]); 582433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 583433d6423SLionel Sambuc } 584433d6423SLionel Sambuc 585433d6423SLionel Sambuc return OK; 586433d6423SLionel Sambuc } 587433d6423SLionel Sambuc 588433d6423SLionel Sambuc static const char *ptestr(u32_t pte) 589433d6423SLionel Sambuc { 590433d6423SLionel Sambuc #define FLAG(constant, name) { \ 591433d6423SLionel Sambuc if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \ 592433d6423SLionel Sambuc } 593433d6423SLionel Sambuc 594433d6423SLionel Sambuc static char str[30]; 595433d6423SLionel Sambuc if(!(pte & ARCH_VM_PTE_PRESENT)) { 596433d6423SLionel Sambuc return "not present"; 597433d6423SLionel Sambuc } 598433d6423SLionel Sambuc str[0] = '\0'; 599433d6423SLionel Sambuc #if defined(__i386__) 600433d6423SLionel Sambuc FLAG(ARCH_VM_PTE_RW, "W"); 601433d6423SLionel Sambuc #elif defined(__arm__) 602433d6423SLionel Sambuc if(pte & ARCH_VM_PTE_RO) { 603433d6423SLionel Sambuc strcat(str, "R "); 604433d6423SLionel Sambuc } else { 605433d6423SLionel Sambuc strcat(str, "W "); 606433d6423SLionel Sambuc } 607433d6423SLionel Sambuc #endif 608433d6423SLionel Sambuc FLAG(ARCH_VM_PTE_USER, "U"); 609433d6423SLionel Sambuc #if defined(__i386__) 610433d6423SLionel Sambuc FLAG(I386_VM_PWT, "PWT"); 611433d6423SLionel Sambuc FLAG(I386_VM_PCD, "PCD"); 612433d6423SLionel Sambuc FLAG(I386_VM_ACC, "ACC"); 613433d6423SLionel Sambuc FLAG(I386_VM_DIRTY, "DIRTY"); 614433d6423SLionel Sambuc FLAG(I386_VM_PS, "PS"); 615433d6423SLionel Sambuc FLAG(I386_VM_GLOBAL, "G"); 616433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL1, "AV1"); 617433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL2, "AV2"); 618433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL3, "AV3"); 619433d6423SLionel Sambuc #elif defined(__arm__) 620433d6423SLionel Sambuc FLAG(ARM_VM_PTE_SUPER, "S"); 621433d6423SLionel Sambuc FLAG(ARM_VM_PTE_S, "SH"); 622433d6423SLionel Sambuc FLAG(ARM_VM_PTE_WB, "WB"); 623433d6423SLionel Sambuc FLAG(ARM_VM_PTE_WT, "WT"); 624433d6423SLionel Sambuc #endif 625433d6423SLionel Sambuc 626433d6423SLionel Sambuc return str; 627433d6423SLionel Sambuc } 628433d6423SLionel Sambuc 629433d6423SLionel Sambuc /*===========================================================================* 630433d6423SLionel Sambuc * pt_map_in_range * 631433d6423SLionel Sambuc *===========================================================================*/ 632433d6423SLionel Sambuc int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp, 633433d6423SLionel Sambuc vir_bytes start, vir_bytes end) 634433d6423SLionel Sambuc { 635433d6423SLionel Sambuc /* Transfer all the mappings from the pt of the source process to the pt of 636433d6423SLionel Sambuc * the destination process in the range specified. 637433d6423SLionel Sambuc */ 638433d6423SLionel Sambuc int pde, pte; 639433d6423SLionel Sambuc vir_bytes viraddr; 640433d6423SLionel Sambuc pt_t *pt, *dst_pt; 641433d6423SLionel Sambuc 642433d6423SLionel Sambuc pt = &src_vmp->vm_pt; 643433d6423SLionel Sambuc dst_pt = &dst_vmp->vm_pt; 644433d6423SLionel Sambuc 645433d6423SLionel Sambuc end = end ? end : VM_DATATOP; 646433d6423SLionel Sambuc assert(start % VM_PAGE_SIZE == 0); 647433d6423SLionel Sambuc assert(end % VM_PAGE_SIZE == 0); 648433d6423SLionel Sambuc 649433d6423SLionel Sambuc assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end); 650433d6423SLionel Sambuc assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES); 651433d6423SLionel Sambuc 652433d6423SLionel Sambuc #if LU_DEBUG 653433d6423SLionel Sambuc printf("VM: pt_map_in_range: src = %d, dst = %d\n", 654433d6423SLionel Sambuc src_vmp->vm_endpoint, dst_vmp->vm_endpoint); 655433d6423SLionel Sambuc printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n", 656433d6423SLionel Sambuc start, ARCH_VM_PDE(start), ARCH_VM_PTE(start), 657433d6423SLionel Sambuc end, ARCH_VM_PDE(end), ARCH_VM_PTE(end)); 658433d6423SLionel Sambuc #endif 659433d6423SLionel Sambuc 660433d6423SLionel Sambuc /* Scan all page-table entries in the range. */ 661433d6423SLionel Sambuc for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) { 662433d6423SLionel Sambuc pde = ARCH_VM_PDE(viraddr); 663433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 664433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break; 665433d6423SLionel Sambuc continue; 666433d6423SLionel Sambuc } 667433d6423SLionel Sambuc pte = ARCH_VM_PTE(viraddr); 668433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 669433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break; 670433d6423SLionel Sambuc continue; 671433d6423SLionel Sambuc } 672433d6423SLionel Sambuc 673433d6423SLionel Sambuc /* Transfer the mapping. */ 674433d6423SLionel Sambuc dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte]; 675*10e6ba68SBen Gras assert(dst_pt->pt_pt[pde]); 676433d6423SLionel Sambuc 677433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break; 678433d6423SLionel Sambuc } 679433d6423SLionel Sambuc 680433d6423SLionel Sambuc return OK; 681433d6423SLionel Sambuc } 682433d6423SLionel Sambuc 683433d6423SLionel Sambuc /*===========================================================================* 684433d6423SLionel Sambuc * pt_ptmap * 685433d6423SLionel Sambuc *===========================================================================*/ 686433d6423SLionel Sambuc int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp) 687433d6423SLionel Sambuc { 688433d6423SLionel Sambuc /* Transfer mappings to page dir and page tables from source process and 68963483e02SCristiano Giuffrida * destination process. 690433d6423SLionel Sambuc */ 691433d6423SLionel Sambuc int pde, r; 692433d6423SLionel Sambuc phys_bytes physaddr; 693433d6423SLionel Sambuc vir_bytes viraddr; 694433d6423SLionel Sambuc pt_t *pt; 695433d6423SLionel Sambuc 696433d6423SLionel Sambuc pt = &src_vmp->vm_pt; 697433d6423SLionel Sambuc 698433d6423SLionel Sambuc #if LU_DEBUG 699433d6423SLionel Sambuc printf("VM: pt_ptmap: src = %d, dst = %d\n", 700433d6423SLionel Sambuc src_vmp->vm_endpoint, dst_vmp->vm_endpoint); 701433d6423SLionel Sambuc #endif 702433d6423SLionel Sambuc 703433d6423SLionel Sambuc /* Transfer mapping to the page directory. */ 704433d6423SLionel Sambuc viraddr = (vir_bytes) pt->pt_dir; 705433d6423SLionel Sambuc physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK; 706433d6423SLionel Sambuc #if defined(__i386__) 707433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE, 708433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 709433d6423SLionel Sambuc #elif defined(__arm__) 710433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE, 711433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | 712433d6423SLionel Sambuc ARM_VM_PTE_CACHED , 713433d6423SLionel Sambuc #endif 714433d6423SLionel Sambuc WMF_OVERWRITE)) != OK) { 715433d6423SLionel Sambuc return r; 716433d6423SLionel Sambuc } 717433d6423SLionel Sambuc #if LU_DEBUG 718433d6423SLionel Sambuc printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n", 719433d6423SLionel Sambuc viraddr, physaddr); 720433d6423SLionel Sambuc #endif 721433d6423SLionel Sambuc 722433d6423SLionel Sambuc /* Scan all non-reserved page-directory entries. */ 723*10e6ba68SBen Gras for(pde=0; pde < kern_start_pde; pde++) { 724433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 725433d6423SLionel Sambuc continue; 726433d6423SLionel Sambuc } 727433d6423SLionel Sambuc 728*10e6ba68SBen Gras if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); } 729*10e6ba68SBen Gras 730433d6423SLionel Sambuc /* Transfer mapping to the page table. */ 731433d6423SLionel Sambuc viraddr = (vir_bytes) pt->pt_pt[pde]; 732433d6423SLionel Sambuc #if defined(__i386__) 733433d6423SLionel Sambuc physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK; 734433d6423SLionel Sambuc #elif defined(__arm__) 735433d6423SLionel Sambuc physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK; 736433d6423SLionel Sambuc #endif 737*10e6ba68SBen Gras assert(viraddr); 738433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE, 739433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW 740433d6423SLionel Sambuc #ifdef __arm__ 741433d6423SLionel Sambuc | ARM_VM_PTE_CACHED 742433d6423SLionel Sambuc #endif 743433d6423SLionel Sambuc , 744433d6423SLionel Sambuc WMF_OVERWRITE)) != OK) { 745433d6423SLionel Sambuc return r; 746433d6423SLionel Sambuc } 747433d6423SLionel Sambuc } 748433d6423SLionel Sambuc 749433d6423SLionel Sambuc return OK; 750433d6423SLionel Sambuc } 751433d6423SLionel Sambuc 752433d6423SLionel Sambuc void pt_clearmapcache(void) 753433d6423SLionel Sambuc { 754433d6423SLionel Sambuc /* Make sure kernel will invalidate tlb when using current 755433d6423SLionel Sambuc * pagetable (i.e. vm's) to make new mappings before new cr3 756433d6423SLionel Sambuc * is loaded. 757433d6423SLionel Sambuc */ 758433d6423SLionel Sambuc if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK) 759433d6423SLionel Sambuc panic("VMCTL_CLEARMAPCACHE failed"); 760433d6423SLionel Sambuc } 761433d6423SLionel Sambuc 762433d6423SLionel Sambuc int pt_writable(struct vmproc *vmp, vir_bytes v) 763433d6423SLionel Sambuc { 764433d6423SLionel Sambuc u32_t entry; 765433d6423SLionel Sambuc pt_t *pt = &vmp->vm_pt; 766433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE)); 767433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v); 768433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v); 769433d6423SLionel Sambuc 770433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 771433d6423SLionel Sambuc assert(pt->pt_pt[pde]); 772433d6423SLionel Sambuc 773433d6423SLionel Sambuc entry = pt->pt_pt[pde][pte]; 774433d6423SLionel Sambuc 775433d6423SLionel Sambuc #if defined(__i386__) 776433d6423SLionel Sambuc return((entry & PTF_WRITE) ? 1 : 0); 777433d6423SLionel Sambuc #elif defined(__arm__) 778433d6423SLionel Sambuc return((entry & ARCH_VM_PTE_RO) ? 0 : 1); 779433d6423SLionel Sambuc #endif 780433d6423SLionel Sambuc } 781433d6423SLionel Sambuc 782433d6423SLionel Sambuc /*===========================================================================* 783433d6423SLionel Sambuc * pt_writemap * 784433d6423SLionel Sambuc *===========================================================================*/ 785433d6423SLionel Sambuc int pt_writemap(struct vmproc * vmp, 786433d6423SLionel Sambuc pt_t *pt, 787433d6423SLionel Sambuc vir_bytes v, 788433d6423SLionel Sambuc phys_bytes physaddr, 789433d6423SLionel Sambuc size_t bytes, 790433d6423SLionel Sambuc u32_t flags, 791433d6423SLionel Sambuc u32_t writemapflags) 792433d6423SLionel Sambuc { 793433d6423SLionel Sambuc /* Write mapping into page table. Allocate a new page table if necessary. */ 794433d6423SLionel Sambuc /* Page directory and table entries for this virtual address. */ 795433d6423SLionel Sambuc int p, pages; 796433d6423SLionel Sambuc int verify = 0; 797433d6423SLionel Sambuc int ret = OK; 798433d6423SLionel Sambuc 799433d6423SLionel Sambuc #ifdef CONFIG_SMP 800433d6423SLionel Sambuc int vminhibit_clear = 0; 801433d6423SLionel Sambuc /* FIXME 802433d6423SLionel Sambuc * don't do it everytime, stop the process only on the first change and 803433d6423SLionel Sambuc * resume the execution on the last change. Do in a wrapper of this 804433d6423SLionel Sambuc * function 805433d6423SLionel Sambuc */ 806433d6423SLionel Sambuc if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR && 807433d6423SLionel Sambuc !(vmp->vm_flags & VMF_EXITING)) { 808433d6423SLionel Sambuc sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0); 809433d6423SLionel Sambuc vminhibit_clear = 1; 810433d6423SLionel Sambuc } 811433d6423SLionel Sambuc #endif 812433d6423SLionel Sambuc 813433d6423SLionel Sambuc if(writemapflags & WMF_VERIFY) 814433d6423SLionel Sambuc verify = 1; 815433d6423SLionel Sambuc 816433d6423SLionel Sambuc assert(!(bytes % VM_PAGE_SIZE)); 817433d6423SLionel Sambuc assert(!(flags & ~(PTF_ALLFLAGS))); 818433d6423SLionel Sambuc 819433d6423SLionel Sambuc pages = bytes / VM_PAGE_SIZE; 820433d6423SLionel Sambuc 821433d6423SLionel Sambuc /* MAP_NONE means to clear the mapping. It doesn't matter 822433d6423SLionel Sambuc * what's actually written into the PTE if PRESENT 823433d6423SLionel Sambuc * isn't on, so we can just write MAP_NONE into it. 824433d6423SLionel Sambuc */ 825433d6423SLionel Sambuc assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT)); 826433d6423SLionel Sambuc assert(physaddr != MAP_NONE || !flags); 827433d6423SLionel Sambuc 828433d6423SLionel Sambuc /* First make sure all the necessary page tables are allocated, 829433d6423SLionel Sambuc * before we start writing in any of them, because it's a pain 830433d6423SLionel Sambuc * to undo our work properly. 831433d6423SLionel Sambuc */ 832433d6423SLionel Sambuc ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify); 833433d6423SLionel Sambuc if(ret != OK) { 834433d6423SLionel Sambuc printf("VM: writemap: pt_ptalloc_in_range failed\n"); 835433d6423SLionel Sambuc goto resume_exit; 836433d6423SLionel Sambuc } 837433d6423SLionel Sambuc 838433d6423SLionel Sambuc /* Now write in them. */ 839433d6423SLionel Sambuc for(p = 0; p < pages; p++) { 840433d6423SLionel Sambuc u32_t entry; 841433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v); 842433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v); 843433d6423SLionel Sambuc 844433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE)); 845433d6423SLionel Sambuc assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES); 846433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 847433d6423SLionel Sambuc 848433d6423SLionel Sambuc /* Page table has to be there. */ 849433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 850433d6423SLionel Sambuc 851433d6423SLionel Sambuc /* We do not expect it to be a bigpage. */ 852433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE)); 853433d6423SLionel Sambuc 854433d6423SLionel Sambuc /* Make sure page directory entry for this page table 855433d6423SLionel Sambuc * is marked present and page table entry is available. 856433d6423SLionel Sambuc */ 857433d6423SLionel Sambuc assert(pt->pt_pt[pde]); 858433d6423SLionel Sambuc 859433d6423SLionel Sambuc #if SANITYCHECKS 860433d6423SLionel Sambuc /* We don't expect to overwrite a page. */ 861433d6423SLionel Sambuc if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY))) 862433d6423SLionel Sambuc assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)); 863433d6423SLionel Sambuc #endif 864433d6423SLionel Sambuc if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) { 865433d6423SLionel Sambuc #if defined(__i386__) 866433d6423SLionel Sambuc physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK; 867433d6423SLionel Sambuc #elif defined(__arm__) 868433d6423SLionel Sambuc physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK; 869433d6423SLionel Sambuc #endif 870433d6423SLionel Sambuc } 871433d6423SLionel Sambuc 872433d6423SLionel Sambuc if(writemapflags & WMF_FREE) { 873433d6423SLionel Sambuc free_mem(ABS2CLICK(physaddr), 1); 874433d6423SLionel Sambuc } 875433d6423SLionel Sambuc 876433d6423SLionel Sambuc /* Entry we will write. */ 877433d6423SLionel Sambuc #if defined(__i386__) 878433d6423SLionel Sambuc entry = (physaddr & ARCH_VM_ADDR_MASK) | flags; 879433d6423SLionel Sambuc #elif defined(__arm__) 880433d6423SLionel Sambuc entry = (physaddr & ARM_VM_PTE_MASK) | flags; 881433d6423SLionel Sambuc #endif 882433d6423SLionel Sambuc 883433d6423SLionel Sambuc if(verify) { 884433d6423SLionel Sambuc u32_t maskedentry; 885433d6423SLionel Sambuc maskedentry = pt->pt_pt[pde][pte]; 886433d6423SLionel Sambuc #if defined(__i386__) 887433d6423SLionel Sambuc maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY); 888433d6423SLionel Sambuc #endif 889433d6423SLionel Sambuc /* Verify pagetable entry. */ 890433d6423SLionel Sambuc #if defined(__i386__) 891433d6423SLionel Sambuc if(entry & ARCH_VM_PTE_RW) { 892433d6423SLionel Sambuc /* If we expect a writable page, allow a readonly page. */ 893433d6423SLionel Sambuc maskedentry |= ARCH_VM_PTE_RW; 894433d6423SLionel Sambuc } 895433d6423SLionel Sambuc #elif defined(__arm__) 896433d6423SLionel Sambuc if(!(entry & ARCH_VM_PTE_RO)) { 897433d6423SLionel Sambuc /* If we expect a writable page, allow a readonly page. */ 898433d6423SLionel Sambuc maskedentry &= ~ARCH_VM_PTE_RO; 899433d6423SLionel Sambuc } 900433d6423SLionel Sambuc maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT); 901433d6423SLionel Sambuc #endif 902433d6423SLionel Sambuc if(maskedentry != entry) { 903433d6423SLionel Sambuc printf("pt_writemap: mismatch: "); 904433d6423SLionel Sambuc #if defined(__i386__) 905433d6423SLionel Sambuc if((entry & ARCH_VM_ADDR_MASK) != 906433d6423SLionel Sambuc (maskedentry & ARCH_VM_ADDR_MASK)) { 907433d6423SLionel Sambuc #elif defined(__arm__) 908433d6423SLionel Sambuc if((entry & ARM_VM_PTE_MASK) != 909433d6423SLionel Sambuc (maskedentry & ARM_VM_PTE_MASK)) { 910433d6423SLionel Sambuc #endif 911433d6423SLionel Sambuc printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", 912433d6423SLionel Sambuc (long)entry, (long)maskedentry); 913433d6423SLionel Sambuc } else printf("phys ok; "); 914433d6423SLionel Sambuc printf(" flags: found %s; ", 915433d6423SLionel Sambuc ptestr(pt->pt_pt[pde][pte])); 916433d6423SLionel Sambuc printf(" masked %s; ", 917433d6423SLionel Sambuc ptestr(maskedentry)); 918433d6423SLionel Sambuc printf(" expected %s\n", ptestr(entry)); 919433d6423SLionel Sambuc printf("found 0x%x, wanted 0x%x\n", 920433d6423SLionel Sambuc pt->pt_pt[pde][pte], entry); 921433d6423SLionel Sambuc ret = EFAULT; 922433d6423SLionel Sambuc goto resume_exit; 923433d6423SLionel Sambuc } 924433d6423SLionel Sambuc } else { 925433d6423SLionel Sambuc /* Write pagetable entry. */ 926433d6423SLionel Sambuc pt->pt_pt[pde][pte] = entry; 927433d6423SLionel Sambuc } 928433d6423SLionel Sambuc 929433d6423SLionel Sambuc physaddr += VM_PAGE_SIZE; 930433d6423SLionel Sambuc v += VM_PAGE_SIZE; 931433d6423SLionel Sambuc } 932433d6423SLionel Sambuc 933433d6423SLionel Sambuc resume_exit: 934433d6423SLionel Sambuc 935433d6423SLionel Sambuc #ifdef CONFIG_SMP 936433d6423SLionel Sambuc if (vminhibit_clear) { 937433d6423SLionel Sambuc assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR && 938433d6423SLionel Sambuc !(vmp->vm_flags & VMF_EXITING)); 939433d6423SLionel Sambuc sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0); 940433d6423SLionel Sambuc } 941433d6423SLionel Sambuc #endif 942433d6423SLionel Sambuc 943433d6423SLionel Sambuc return ret; 944433d6423SLionel Sambuc } 945433d6423SLionel Sambuc 946433d6423SLionel Sambuc /*===========================================================================* 947433d6423SLionel Sambuc * pt_checkrange * 948433d6423SLionel Sambuc *===========================================================================*/ 949433d6423SLionel Sambuc int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, 950433d6423SLionel Sambuc int write) 951433d6423SLionel Sambuc { 952433d6423SLionel Sambuc int p, pages; 953433d6423SLionel Sambuc 954433d6423SLionel Sambuc assert(!(bytes % VM_PAGE_SIZE)); 955433d6423SLionel Sambuc 956433d6423SLionel Sambuc pages = bytes / VM_PAGE_SIZE; 957433d6423SLionel Sambuc 958433d6423SLionel Sambuc for(p = 0; p < pages; p++) { 959433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v); 960433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v); 961433d6423SLionel Sambuc 962433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE)); 963433d6423SLionel Sambuc assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES); 964433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 965433d6423SLionel Sambuc 966433d6423SLionel Sambuc /* Page table has to be there. */ 967433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) 968433d6423SLionel Sambuc return EFAULT; 969433d6423SLionel Sambuc 970433d6423SLionel Sambuc /* Make sure page directory entry for this page table 971433d6423SLionel Sambuc * is marked present and page table entry is available. 972433d6423SLionel Sambuc */ 973433d6423SLionel Sambuc assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]); 974433d6423SLionel Sambuc 975433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 976433d6423SLionel Sambuc return EFAULT; 977433d6423SLionel Sambuc } 978433d6423SLionel Sambuc 979433d6423SLionel Sambuc #if defined(__i386__) 980433d6423SLionel Sambuc if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { 981433d6423SLionel Sambuc #elif defined(__arm__) 982433d6423SLionel Sambuc if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { 983433d6423SLionel Sambuc #endif 984433d6423SLionel Sambuc return EFAULT; 985433d6423SLionel Sambuc } 986433d6423SLionel Sambuc 987433d6423SLionel Sambuc v += VM_PAGE_SIZE; 988433d6423SLionel Sambuc } 989433d6423SLionel Sambuc 990433d6423SLionel Sambuc return OK; 991433d6423SLionel Sambuc } 992433d6423SLionel Sambuc 993433d6423SLionel Sambuc /*===========================================================================* 994433d6423SLionel Sambuc * pt_new * 995433d6423SLionel Sambuc *===========================================================================*/ 996433d6423SLionel Sambuc int pt_new(pt_t *pt) 997433d6423SLionel Sambuc { 998433d6423SLionel Sambuc /* Allocate a pagetable root. Allocate a page-aligned page directory 999433d6423SLionel Sambuc * and set them to 0 (indicating no page tables are allocated). Lookup 1000433d6423SLionel Sambuc * its physical address as we'll need that in the future. Verify it's 1001433d6423SLionel Sambuc * page-aligned. 1002433d6423SLionel Sambuc */ 1003433d6423SLionel Sambuc int i, r; 1004433d6423SLionel Sambuc 1005433d6423SLionel Sambuc /* Don't ever re-allocate/re-move a certain process slot's 1006433d6423SLionel Sambuc * page directory once it's been created. This is a fraction 1007433d6423SLionel Sambuc * faster, but also avoids having to invalidate the page 1008433d6423SLionel Sambuc * mappings from in-kernel page tables pointing to 1009433d6423SLionel Sambuc * the page directories (the page_directories data). 1010433d6423SLionel Sambuc */ 1011433d6423SLionel Sambuc if(!pt->pt_dir && 1012433d6423SLionel Sambuc !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys, 1013433d6423SLionel Sambuc VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) { 1014433d6423SLionel Sambuc return ENOMEM; 1015433d6423SLionel Sambuc } 1016433d6423SLionel Sambuc 1017433d6423SLionel Sambuc assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE)); 1018433d6423SLionel Sambuc 1019433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) { 1020433d6423SLionel Sambuc pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */ 1021433d6423SLionel Sambuc pt->pt_pt[i] = NULL; 1022433d6423SLionel Sambuc } 1023433d6423SLionel Sambuc 1024433d6423SLionel Sambuc /* Where to start looking for free virtual address space? */ 1025433d6423SLionel Sambuc pt->pt_virtop = 0; 1026433d6423SLionel Sambuc 1027433d6423SLionel Sambuc /* Map in kernel. */ 1028433d6423SLionel Sambuc if((r=pt_mapkernel(pt)) != OK) 1029433d6423SLionel Sambuc return r; 1030433d6423SLionel Sambuc 1031433d6423SLionel Sambuc return OK; 1032433d6423SLionel Sambuc } 1033433d6423SLionel Sambuc 1034433d6423SLionel Sambuc static int freepde(void) 1035433d6423SLionel Sambuc { 1036433d6423SLionel Sambuc int p = kernel_boot_info.freepde_start++; 1037433d6423SLionel Sambuc assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES); 1038433d6423SLionel Sambuc return p; 1039433d6423SLionel Sambuc } 1040433d6423SLionel Sambuc 1041*10e6ba68SBen Gras void pt_allocate_kernel_mapped_pagetables(void) 1042*10e6ba68SBen Gras { 1043*10e6ba68SBen Gras /* Reserve PDEs available for mapping in the page directories. */ 1044*10e6ba68SBen Gras int pd; 1045*10e6ba68SBen Gras for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { 1046*10e6ba68SBen Gras struct pdm *pdm = &pagedir_mappings[pd]; 1047*10e6ba68SBen Gras if(!pdm->pdeno) { 1048*10e6ba68SBen Gras pdm->pdeno = freepde(); 1049*10e6ba68SBen Gras assert(pdm->pdeno); 1050*10e6ba68SBen Gras } 1051*10e6ba68SBen Gras phys_bytes ph; 1052*10e6ba68SBen Gras 1053*10e6ba68SBen Gras /* Allocate us a page table in which to 1054*10e6ba68SBen Gras * remember page directory pointers. 1055*10e6ba68SBen Gras */ 1056*10e6ba68SBen Gras if(!(pdm->page_directories = 1057*10e6ba68SBen Gras vm_allocpage(&ph, VMP_PAGETABLE))) { 1058*10e6ba68SBen Gras panic("no virt addr for vm mappings"); 1059*10e6ba68SBen Gras } 1060*10e6ba68SBen Gras memset(pdm->page_directories, 0, VM_PAGE_SIZE); 1061*10e6ba68SBen Gras pdm->phys = ph; 1062*10e6ba68SBen Gras 1063*10e6ba68SBen Gras #if defined(__i386__) 1064*10e6ba68SBen Gras pdm->val = (ph & ARCH_VM_ADDR_MASK) | 1065*10e6ba68SBen Gras ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW; 1066*10e6ba68SBen Gras #elif defined(__arm__) 1067*10e6ba68SBen Gras pdm->val = (ph & ARCH_VM_PDE_MASK) 1068*10e6ba68SBen Gras | ARCH_VM_PDE_PRESENT 1069*10e6ba68SBen Gras | ARM_VM_PTE_CACHED 1070*10e6ba68SBen Gras | ARM_VM_PDE_DOMAIN; //LSC FIXME 1071*10e6ba68SBen Gras #endif 1072*10e6ba68SBen Gras } 1073*10e6ba68SBen Gras } 1074*10e6ba68SBen Gras 1075433d6423SLionel Sambuc /*===========================================================================* 1076433d6423SLionel Sambuc * pt_init * 1077433d6423SLionel Sambuc *===========================================================================*/ 1078433d6423SLionel Sambuc void pt_init(void) 1079433d6423SLionel Sambuc { 1080433d6423SLionel Sambuc pt_t *newpt; 1081433d6423SLionel Sambuc int s, r, p; 1082*10e6ba68SBen Gras phys_bytes phys; 1083433d6423SLionel Sambuc vir_bytes sparepages_mem; 1084433d6423SLionel Sambuc #if defined(__arm__) 1085433d6423SLionel Sambuc vir_bytes sparepagedirs_mem; 1086433d6423SLionel Sambuc #endif 1087433d6423SLionel Sambuc static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES]; 1088433d6423SLionel Sambuc int m = kernel_boot_info.kern_mod; 1089433d6423SLionel Sambuc #if defined(__i386__) 1090433d6423SLionel Sambuc int global_bit_ok = 0; 1091433d6423SLionel Sambuc u32_t mypdbr; /* Page Directory Base Register (cr3) value */ 1092433d6423SLionel Sambuc #elif defined(__arm__) 1093433d6423SLionel Sambuc u32_t myttbr; 1094433d6423SLionel Sambuc #endif 1095433d6423SLionel Sambuc 1096433d6423SLionel Sambuc /* Find what the physical location of the kernel is. */ 1097433d6423SLionel Sambuc assert(m >= 0); 1098433d6423SLionel Sambuc assert(m < kernel_boot_info.mods_with_kernel); 1099433d6423SLionel Sambuc assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS); 1100433d6423SLionel Sambuc kern_mb_mod = &kernel_boot_info.module_list[m]; 1101433d6423SLionel Sambuc kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start; 1102433d6423SLionel Sambuc assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE)); 1103433d6423SLionel Sambuc assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE)); 1104433d6423SLionel Sambuc kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE; 1105433d6423SLionel Sambuc 1106433d6423SLionel Sambuc /* Get ourselves spare pages. */ 1107433d6423SLionel Sambuc sparepages_mem = (vir_bytes) static_sparepages; 1108433d6423SLionel Sambuc assert(!(sparepages_mem % VM_PAGE_SIZE)); 1109433d6423SLionel Sambuc 1110433d6423SLionel Sambuc #if defined(__arm__) 1111433d6423SLionel Sambuc /* Get ourselves spare pagedirs. */ 1112433d6423SLionel Sambuc sparepagedirs_mem = (vir_bytes) static_sparepagedirs; 1113433d6423SLionel Sambuc assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE)); 1114433d6423SLionel Sambuc #endif 1115433d6423SLionel Sambuc 1116433d6423SLionel Sambuc /* Spare pages are used to allocate memory before VM has its own page 1117433d6423SLionel Sambuc * table that things (i.e. arbitrary physical memory) can be mapped into. 1118433d6423SLionel Sambuc * We get it by pre-allocating it in our bss (allocated and mapped in by 1119433d6423SLionel Sambuc * the kernel) in static_sparepages. We also need the physical addresses 1120433d6423SLionel Sambuc * though; we look them up now so they are ready for use. 1121433d6423SLionel Sambuc */ 1122433d6423SLionel Sambuc #if defined(__arm__) 1123433d6423SLionel Sambuc missing_sparedirs = 0; 1124433d6423SLionel Sambuc assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS); 1125433d6423SLionel Sambuc for(s = 0; s < SPAREPAGEDIRS; s++) { 1126433d6423SLionel Sambuc vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);; 1127433d6423SLionel Sambuc phys_bytes ph; 1128433d6423SLionel Sambuc if((r=sys_umap(SELF, VM_D, (vir_bytes) v, 1129433d6423SLionel Sambuc ARCH_PAGEDIR_SIZE, &ph)) != OK) 1130433d6423SLionel Sambuc panic("pt_init: sys_umap failed: %d", r); 1131433d6423SLionel Sambuc if(s >= STATIC_SPAREPAGEDIRS) { 1132433d6423SLionel Sambuc sparepagedirs[s].pagedir = NULL; 1133433d6423SLionel Sambuc missing_sparedirs++; 1134433d6423SLionel Sambuc continue; 1135433d6423SLionel Sambuc } 1136433d6423SLionel Sambuc sparepagedirs[s].pagedir = (void *) v; 1137433d6423SLionel Sambuc sparepagedirs[s].phys = ph; 1138433d6423SLionel Sambuc } 1139433d6423SLionel Sambuc #endif 1140433d6423SLionel Sambuc 1141433d6423SLionel Sambuc if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0))) 1142433d6423SLionel Sambuc panic("reservedqueue_new for single pages failed"); 1143433d6423SLionel Sambuc 1144433d6423SLionel Sambuc assert(STATIC_SPAREPAGES < SPAREPAGES); 1145433d6423SLionel Sambuc for(s = 0; s < STATIC_SPAREPAGES; s++) { 1146433d6423SLionel Sambuc void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE); 1147433d6423SLionel Sambuc phys_bytes ph; 1148433d6423SLionel Sambuc if((r=sys_umap(SELF, VM_D, (vir_bytes) v, 1149433d6423SLionel Sambuc VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK) 1150433d6423SLionel Sambuc panic("pt_init: sys_umap failed: %d", r); 1151433d6423SLionel Sambuc reservedqueue_add(spare_pagequeue, v, ph); 1152433d6423SLionel Sambuc } 1153433d6423SLionel Sambuc 1154433d6423SLionel Sambuc #if defined(__i386__) 1155433d6423SLionel Sambuc /* global bit and 4MB pages available? */ 1156433d6423SLionel Sambuc global_bit_ok = _cpufeature(_CPUF_I386_PGE); 1157433d6423SLionel Sambuc bigpage_ok = _cpufeature(_CPUF_I386_PSE); 1158433d6423SLionel Sambuc 1159433d6423SLionel Sambuc /* Set bit for PTE's and PDE's if available. */ 1160433d6423SLionel Sambuc if(global_bit_ok) 1161433d6423SLionel Sambuc global_bit = I386_VM_GLOBAL; 1162433d6423SLionel Sambuc #endif 1163433d6423SLionel Sambuc 1164433d6423SLionel Sambuc /* Now reserve another pde for kernel's own mappings. */ 1165433d6423SLionel Sambuc { 1166433d6423SLionel Sambuc int kernmap_pde; 1167433d6423SLionel Sambuc phys_bytes addr, len; 1168433d6423SLionel Sambuc int flags, pindex = 0; 1169433d6423SLionel Sambuc u32_t offset = 0; 1170433d6423SLionel Sambuc 1171433d6423SLionel Sambuc kernmap_pde = freepde(); 1172433d6423SLionel Sambuc offset = kernmap_pde * ARCH_BIG_PAGE_SIZE; 1173433d6423SLionel Sambuc 1174433d6423SLionel Sambuc while(sys_vmctl_get_mapping(pindex, &addr, &len, 1175433d6423SLionel Sambuc &flags) == OK) { 1176433d6423SLionel Sambuc int usedpde; 1177433d6423SLionel Sambuc vir_bytes vir; 1178433d6423SLionel Sambuc if(pindex >= MAX_KERNMAPPINGS) 1179433d6423SLionel Sambuc panic("VM: too many kernel mappings: %d", pindex); 1180433d6423SLionel Sambuc kern_mappings[pindex].phys_addr = addr; 1181433d6423SLionel Sambuc kern_mappings[pindex].len = len; 1182433d6423SLionel Sambuc kern_mappings[pindex].flags = flags; 1183433d6423SLionel Sambuc kern_mappings[pindex].vir_addr = offset; 1184433d6423SLionel Sambuc kern_mappings[pindex].flags = 1185433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT; 1186433d6423SLionel Sambuc if(flags & VMMF_UNCACHED) 1187433d6423SLionel Sambuc #if defined(__i386__) 1188433d6423SLionel Sambuc kern_mappings[pindex].flags |= PTF_NOCACHE; 1189433d6423SLionel Sambuc #elif defined(__arm__) 1190433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE; 1191433d6423SLionel Sambuc else { 1192433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_CACHED; 1193433d6423SLionel Sambuc } 1194433d6423SLionel Sambuc #endif 1195433d6423SLionel Sambuc if(flags & VMMF_USER) 1196433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_USER; 1197433d6423SLionel Sambuc #if defined(__arm__) 1198433d6423SLionel Sambuc else 1199433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER; 1200433d6423SLionel Sambuc #endif 1201433d6423SLionel Sambuc if(flags & VMMF_WRITE) 1202433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_RW; 1203433d6423SLionel Sambuc #if defined(__arm__) 1204433d6423SLionel Sambuc else 1205433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_RO; 1206433d6423SLionel Sambuc #endif 1207433d6423SLionel Sambuc 1208433d6423SLionel Sambuc #if defined(__i386__) 1209433d6423SLionel Sambuc if(flags & VMMF_GLO) 1210433d6423SLionel Sambuc kern_mappings[pindex].flags |= I386_VM_GLOBAL; 1211433d6423SLionel Sambuc #endif 1212433d6423SLionel Sambuc 1213433d6423SLionel Sambuc if(addr % VM_PAGE_SIZE) 1214433d6423SLionel Sambuc panic("VM: addr unaligned: %lu", addr); 1215433d6423SLionel Sambuc if(len % VM_PAGE_SIZE) 1216433d6423SLionel Sambuc panic("VM: len unaligned: %lu", len); 1217433d6423SLionel Sambuc vir = offset; 1218433d6423SLionel Sambuc if(sys_vmctl_reply_mapping(pindex, vir) != OK) 1219433d6423SLionel Sambuc panic("VM: reply failed"); 1220433d6423SLionel Sambuc offset += len; 1221433d6423SLionel Sambuc pindex++; 1222433d6423SLionel Sambuc kernmappings++; 1223433d6423SLionel Sambuc 1224433d6423SLionel Sambuc usedpde = ARCH_VM_PDE(offset); 1225433d6423SLionel Sambuc while(usedpde > kernmap_pde) { 1226433d6423SLionel Sambuc int newpde = freepde(); 1227433d6423SLionel Sambuc assert(newpde == kernmap_pde+1); 1228433d6423SLionel Sambuc kernmap_pde = newpde; 1229433d6423SLionel Sambuc } 1230433d6423SLionel Sambuc } 1231433d6423SLionel Sambuc } 1232433d6423SLionel Sambuc 1233*10e6ba68SBen Gras pt_allocate_kernel_mapped_pagetables(); 1234433d6423SLionel Sambuc 1235433d6423SLionel Sambuc /* Allright. Now. We have to make our own page directory and page tables, 1236433d6423SLionel Sambuc * that the kernel has already set up, accessible to us. It's easier to 1237433d6423SLionel Sambuc * understand if we just copy all the required pages (i.e. page directory 1238433d6423SLionel Sambuc * and page tables), and set up the pointers as if VM had done it itself. 1239433d6423SLionel Sambuc * 1240433d6423SLionel Sambuc * This allocation will happen without using any page table, and just 1241433d6423SLionel Sambuc * uses spare pages. 1242433d6423SLionel Sambuc */ 1243433d6423SLionel Sambuc newpt = &vmprocess->vm_pt; 1244433d6423SLionel Sambuc if(pt_new(newpt) != OK) 1245433d6423SLionel Sambuc panic("vm pt_new failed"); 1246433d6423SLionel Sambuc 1247433d6423SLionel Sambuc /* Get our current pagedir so we can see it. */ 1248433d6423SLionel Sambuc #if defined(__i386__) 1249433d6423SLionel Sambuc if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK) 1250433d6423SLionel Sambuc #elif defined(__arm__) 1251433d6423SLionel Sambuc if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK) 1252433d6423SLionel Sambuc #endif 1253433d6423SLionel Sambuc 1254433d6423SLionel Sambuc panic("VM: sys_vmctl_get_pdbr failed"); 1255433d6423SLionel Sambuc #if defined(__i386__) 1256433d6423SLionel Sambuc if(sys_vircopy(NONE, mypdbr, SELF, 1257433d6423SLionel Sambuc (vir_bytes) currentpagedir, VM_PAGE_SIZE, 0) != OK) 1258433d6423SLionel Sambuc #elif defined(__arm__) 1259433d6423SLionel Sambuc if(sys_vircopy(NONE, myttbr, SELF, 1260433d6423SLionel Sambuc (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE, 0) != OK) 1261433d6423SLionel Sambuc #endif 1262433d6423SLionel Sambuc panic("VM: sys_vircopy failed"); 1263433d6423SLionel Sambuc 1264433d6423SLionel Sambuc /* We have mapped in kernel ourselves; now copy mappings for VM 1265433d6423SLionel Sambuc * that kernel made, including allocations for BSS. Skip identity 1266433d6423SLionel Sambuc * mapping bits; just map in VM. 1267433d6423SLionel Sambuc */ 1268433d6423SLionel Sambuc for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) { 1269433d6423SLionel Sambuc u32_t entry = currentpagedir[p]; 1270433d6423SLionel Sambuc phys_bytes ptaddr_kern, ptaddr_us; 1271433d6423SLionel Sambuc 1272433d6423SLionel Sambuc /* BIGPAGEs are kernel mapping (do ourselves) or boot 1273433d6423SLionel Sambuc * identity mapping (don't want). 1274433d6423SLionel Sambuc */ 1275433d6423SLionel Sambuc if(!(entry & ARCH_VM_PDE_PRESENT)) continue; 1276433d6423SLionel Sambuc if((entry & ARCH_VM_BIGPAGE)) continue; 1277433d6423SLionel Sambuc 1278433d6423SLionel Sambuc if(pt_ptalloc(newpt, p, 0) != OK) 1279433d6423SLionel Sambuc panic("pt_ptalloc failed"); 1280433d6423SLionel Sambuc assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT); 1281433d6423SLionel Sambuc 1282433d6423SLionel Sambuc #if defined(__i386__) 1283433d6423SLionel Sambuc ptaddr_kern = entry & ARCH_VM_ADDR_MASK; 1284433d6423SLionel Sambuc ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK; 1285433d6423SLionel Sambuc #elif defined(__arm__) 1286433d6423SLionel Sambuc ptaddr_kern = entry & ARCH_VM_PDE_MASK; 1287433d6423SLionel Sambuc ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK; 1288433d6423SLionel Sambuc #endif 1289433d6423SLionel Sambuc 1290433d6423SLionel Sambuc /* Copy kernel-initialized pagetable contents into our 1291433d6423SLionel Sambuc * normally accessible pagetable. 1292433d6423SLionel Sambuc */ 1293433d6423SLionel Sambuc if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK) 1294433d6423SLionel Sambuc panic("pt_init: abscopy failed"); 1295433d6423SLionel Sambuc } 1296433d6423SLionel Sambuc 1297433d6423SLionel Sambuc /* Inform kernel vm has a newly built page table. */ 1298433d6423SLionel Sambuc assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR); 1299433d6423SLionel Sambuc pt_bind(newpt, &vmproc[VM_PROC_NR]); 1300433d6423SLionel Sambuc 1301433d6423SLionel Sambuc pt_init_done = 1; 1302433d6423SLionel Sambuc 1303*10e6ba68SBen Gras /* VM is now fully functional in that it can dynamically allocate memory 1304*10e6ba68SBen Gras * for itself. 1305*10e6ba68SBen Gras * 1306*10e6ba68SBen Gras * We don't want to keep using the bootstrap statically allocated spare 1307*10e6ba68SBen Gras * pages though, as the physical addresses will change on liveupdate. So we 1308*10e6ba68SBen Gras * re-do part of the initialization now with purely dynamically allocated 1309*10e6ba68SBen Gras * memory. First throw out the static pool. 1310*10e6ba68SBen Gras */ 1311*10e6ba68SBen Gras 1312*10e6ba68SBen Gras alloc_cycle(); /* Make sure allocating works */ 1313*10e6ba68SBen Gras while(vm_getsparepage(&phys)) ; /* Use up all static pages */ 1314*10e6ba68SBen Gras alloc_cycle(); /* Refill spares with dynamic */ 1315*10e6ba68SBen Gras pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */ 1316*10e6ba68SBen Gras pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */ 1317*10e6ba68SBen Gras pt_mapkernel(newpt); /* Rewrite pagetable info */ 1318*10e6ba68SBen Gras 1319*10e6ba68SBen Gras /* Flush TLB just in case any of those mappings have been touched */ 1320*10e6ba68SBen Gras if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 1321*10e6ba68SBen Gras panic("VMCTL_FLUSHTLB failed"); 1322*10e6ba68SBen Gras } 1323*10e6ba68SBen Gras 1324433d6423SLionel Sambuc /* All OK. */ 1325433d6423SLionel Sambuc return; 1326433d6423SLionel Sambuc } 1327433d6423SLionel Sambuc 1328433d6423SLionel Sambuc /*===========================================================================* 1329433d6423SLionel Sambuc * pt_bind * 1330433d6423SLionel Sambuc *===========================================================================*/ 1331433d6423SLionel Sambuc int pt_bind(pt_t *pt, struct vmproc *who) 1332433d6423SLionel Sambuc { 1333433d6423SLionel Sambuc int procslot, pdeslot; 1334433d6423SLionel Sambuc u32_t phys; 1335433d6423SLionel Sambuc void *pdes; 1336433d6423SLionel Sambuc int pagedir_pde; 1337433d6423SLionel Sambuc int slots_per_pde; 1338433d6423SLionel Sambuc int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE; 1339433d6423SLionel Sambuc struct pdm *pdm; 1340433d6423SLionel Sambuc 1341433d6423SLionel Sambuc slots_per_pde = ARCH_VM_PT_ENTRIES / pages_per_pagedir; 1342433d6423SLionel Sambuc 1343433d6423SLionel Sambuc /* Basic sanity checks. */ 1344433d6423SLionel Sambuc assert(who); 1345433d6423SLionel Sambuc assert(who->vm_flags & VMF_INUSE); 1346433d6423SLionel Sambuc assert(pt); 1347433d6423SLionel Sambuc 1348433d6423SLionel Sambuc procslot = who->vm_slot; 1349433d6423SLionel Sambuc pdm = &pagedir_mappings[procslot/slots_per_pde]; 1350433d6423SLionel Sambuc pdeslot = procslot%slots_per_pde; 1351433d6423SLionel Sambuc pagedir_pde = pdm->pdeno; 1352433d6423SLionel Sambuc assert(pdeslot >= 0); 1353433d6423SLionel Sambuc assert(procslot < ELEMENTS(vmproc)); 1354433d6423SLionel Sambuc assert(pdeslot < ARCH_VM_PT_ENTRIES / pages_per_pagedir); 1355433d6423SLionel Sambuc assert(pagedir_pde >= 0); 1356433d6423SLionel Sambuc 1357433d6423SLionel Sambuc #if defined(__i386__) 1358433d6423SLionel Sambuc phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK; 1359433d6423SLionel Sambuc #elif defined(__arm__) 1360433d6423SLionel Sambuc phys = pt->pt_dir_phys & ARM_VM_PTE_MASK; 1361433d6423SLionel Sambuc #endif 1362433d6423SLionel Sambuc assert(pt->pt_dir_phys == phys); 1363433d6423SLionel Sambuc assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE)); 1364433d6423SLionel Sambuc 1365433d6423SLionel Sambuc /* Update "page directory pagetable." */ 1366433d6423SLionel Sambuc #if defined(__i386__) 1367433d6423SLionel Sambuc pdm->page_directories[pdeslot] = 1368433d6423SLionel Sambuc phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW; 1369433d6423SLionel Sambuc #elif defined(__arm__) 1370433d6423SLionel Sambuc { 1371433d6423SLionel Sambuc int i; 1372433d6423SLionel Sambuc for (i = 0; i < pages_per_pagedir; i++) { 1373433d6423SLionel Sambuc pdm->page_directories[pdeslot*pages_per_pagedir+i] = 1374433d6423SLionel Sambuc (phys+i*VM_PAGE_SIZE) 1375433d6423SLionel Sambuc | ARCH_VM_PTE_PRESENT 1376433d6423SLionel Sambuc | ARCH_VM_PTE_RW 1377433d6423SLionel Sambuc | ARM_VM_PTE_CACHED 1378433d6423SLionel Sambuc | ARCH_VM_PTE_USER; //LSC FIXME 1379433d6423SLionel Sambuc } 1380433d6423SLionel Sambuc } 1381433d6423SLionel Sambuc #endif 1382433d6423SLionel Sambuc 1383433d6423SLionel Sambuc /* This is where the PDE's will be visible to the kernel 1384433d6423SLionel Sambuc * in its address space. 1385433d6423SLionel Sambuc */ 1386433d6423SLionel Sambuc pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE + 1387433d6423SLionel Sambuc #if defined(__i386__) 1388433d6423SLionel Sambuc pdeslot * VM_PAGE_SIZE); 1389433d6423SLionel Sambuc #elif defined(__arm__) 1390433d6423SLionel Sambuc pdeslot * ARCH_PAGEDIR_SIZE); 1391433d6423SLionel Sambuc #endif 1392433d6423SLionel Sambuc 1393433d6423SLionel Sambuc /* Tell kernel about new page table root. */ 1394433d6423SLionel Sambuc return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys , pdes); 1395433d6423SLionel Sambuc } 1396433d6423SLionel Sambuc 1397433d6423SLionel Sambuc /*===========================================================================* 1398433d6423SLionel Sambuc * pt_free * 1399433d6423SLionel Sambuc *===========================================================================*/ 1400433d6423SLionel Sambuc void pt_free(pt_t *pt) 1401433d6423SLionel Sambuc { 1402433d6423SLionel Sambuc /* Free memory associated with this pagetable. */ 1403433d6423SLionel Sambuc int i; 1404433d6423SLionel Sambuc 1405433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) 1406433d6423SLionel Sambuc if(pt->pt_pt[i]) 1407433d6423SLionel Sambuc vm_freepages((vir_bytes) pt->pt_pt[i], 1); 1408433d6423SLionel Sambuc 1409433d6423SLionel Sambuc return; 1410433d6423SLionel Sambuc } 1411433d6423SLionel Sambuc 1412433d6423SLionel Sambuc /*===========================================================================* 1413433d6423SLionel Sambuc * pt_mapkernel * 1414433d6423SLionel Sambuc *===========================================================================*/ 1415433d6423SLionel Sambuc int pt_mapkernel(pt_t *pt) 1416433d6423SLionel Sambuc { 1417433d6423SLionel Sambuc int i; 1418433d6423SLionel Sambuc int kern_pde = kern_start_pde; 1419433d6423SLionel Sambuc phys_bytes addr, mapped = 0; 1420433d6423SLionel Sambuc 1421433d6423SLionel Sambuc /* Any page table needs to map in the kernel address space. */ 1422433d6423SLionel Sambuc assert(bigpage_ok); 1423433d6423SLionel Sambuc assert(kern_pde >= 0); 1424433d6423SLionel Sambuc 1425433d6423SLionel Sambuc /* pt_init() has made sure this is ok. */ 1426433d6423SLionel Sambuc addr = kern_mb_mod->mod_start; 1427433d6423SLionel Sambuc 1428433d6423SLionel Sambuc /* Actually mapping in kernel */ 1429433d6423SLionel Sambuc while(mapped < kern_size) { 1430433d6423SLionel Sambuc #if defined(__i386__) 1431433d6423SLionel Sambuc pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT | 1432433d6423SLionel Sambuc ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit; 1433433d6423SLionel Sambuc #elif defined(__arm__) 1434433d6423SLionel Sambuc pt->pt_dir[kern_pde] = (addr & ARM_VM_SECTION_MASK) 1435433d6423SLionel Sambuc | ARM_VM_SECTION 1436433d6423SLionel Sambuc | ARM_VM_SECTION_DOMAIN 1437433d6423SLionel Sambuc | ARM_VM_SECTION_CACHED 1438433d6423SLionel Sambuc | ARM_VM_SECTION_SUPER; 1439433d6423SLionel Sambuc #endif 1440433d6423SLionel Sambuc kern_pde++; 1441433d6423SLionel Sambuc mapped += ARCH_BIG_PAGE_SIZE; 1442433d6423SLionel Sambuc addr += ARCH_BIG_PAGE_SIZE; 1443433d6423SLionel Sambuc } 1444433d6423SLionel Sambuc 1445433d6423SLionel Sambuc /* Kernel also wants to know about all page directories. */ 1446433d6423SLionel Sambuc { 1447433d6423SLionel Sambuc int pd; 1448433d6423SLionel Sambuc for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { 1449433d6423SLionel Sambuc struct pdm *pdm = &pagedir_mappings[pd]; 1450433d6423SLionel Sambuc 1451433d6423SLionel Sambuc assert(pdm->pdeno > 0); 1452433d6423SLionel Sambuc assert(pdm->pdeno > kern_pde); 1453433d6423SLionel Sambuc pt->pt_dir[pdm->pdeno] = pdm->val; 1454433d6423SLionel Sambuc } 1455433d6423SLionel Sambuc } 1456433d6423SLionel Sambuc 1457433d6423SLionel Sambuc /* Kernel also wants various mappings of its own. */ 1458433d6423SLionel Sambuc for(i = 0; i < kernmappings; i++) { 1459433d6423SLionel Sambuc int r; 1460433d6423SLionel Sambuc if((r=pt_writemap(NULL, pt, 1461433d6423SLionel Sambuc kern_mappings[i].vir_addr, 1462433d6423SLionel Sambuc kern_mappings[i].phys_addr, 1463433d6423SLionel Sambuc kern_mappings[i].len, 1464433d6423SLionel Sambuc kern_mappings[i].flags, 0)) != OK) { 1465433d6423SLionel Sambuc return r; 1466433d6423SLionel Sambuc } 1467433d6423SLionel Sambuc 1468433d6423SLionel Sambuc } 1469433d6423SLionel Sambuc 1470433d6423SLionel Sambuc return OK; 1471433d6423SLionel Sambuc } 1472433d6423SLionel Sambuc 1473433d6423SLionel Sambuc int get_vm_self_pages(void) { return vm_self_pages; } 1474