1433d6423SLionel Sambuc
2433d6423SLionel Sambuc #define _SYSTEM 1
3433d6423SLionel Sambuc
4433d6423SLionel Sambuc #include <minix/callnr.h>
5433d6423SLionel Sambuc #include <minix/com.h>
6433d6423SLionel Sambuc #include <minix/config.h>
7433d6423SLionel Sambuc #include <minix/const.h>
8433d6423SLionel Sambuc #include <minix/ds.h>
9433d6423SLionel Sambuc #include <minix/endpoint.h>
10433d6423SLionel Sambuc #include <minix/minlib.h>
11433d6423SLionel Sambuc #include <minix/type.h>
12433d6423SLionel Sambuc #include <minix/ipc.h>
13433d6423SLionel Sambuc #include <minix/sysutil.h>
14433d6423SLionel Sambuc #include <minix/syslib.h>
15433d6423SLionel Sambuc #include <minix/safecopies.h>
16433d6423SLionel Sambuc #include <minix/cpufeature.h>
17433d6423SLionel Sambuc #include <minix/bitmap.h>
18433d6423SLionel Sambuc #include <minix/debug.h>
19433d6423SLionel Sambuc
20433d6423SLionel Sambuc #include <errno.h>
21433d6423SLionel Sambuc #include <stdlib.h>
22433d6423SLionel Sambuc #include <assert.h>
23433d6423SLionel Sambuc #include <string.h>
24433d6423SLionel Sambuc #include <stdio.h>
25433d6423SLionel Sambuc #include <fcntl.h>
26433d6423SLionel Sambuc #include <stdlib.h>
27433d6423SLionel Sambuc
28433d6423SLionel Sambuc #include "proto.h"
29433d6423SLionel Sambuc #include "glo.h"
30433d6423SLionel Sambuc #include "util.h"
31433d6423SLionel Sambuc #include "vm.h"
32433d6423SLionel Sambuc #include "sanitycheck.h"
33433d6423SLionel Sambuc
34433d6423SLionel Sambuc static int vm_self_pages;
35433d6423SLionel Sambuc
36433d6423SLionel Sambuc /* PDE used to map in kernel, kernel physical address. */
37433d6423SLionel Sambuc #define MAX_PAGEDIR_PDES 5
38433d6423SLionel Sambuc static struct pdm {
39433d6423SLionel Sambuc int pdeno;
40433d6423SLionel Sambuc u32_t val;
41433d6423SLionel Sambuc phys_bytes phys;
42433d6423SLionel Sambuc u32_t *page_directories;
43433d6423SLionel Sambuc } pagedir_mappings[MAX_PAGEDIR_PDES];
44433d6423SLionel Sambuc
45433d6423SLionel Sambuc static multiboot_module_t *kern_mb_mod = NULL;
46433d6423SLionel Sambuc static size_t kern_size = 0;
47433d6423SLionel Sambuc static int kern_start_pde = -1;
48433d6423SLionel Sambuc
49433d6423SLionel Sambuc /* big page size available in hardware? */
50433d6423SLionel Sambuc static int bigpage_ok = 1;
51433d6423SLionel Sambuc
52433d6423SLionel Sambuc /* Our process table entry. */
53433d6423SLionel Sambuc struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
54433d6423SLionel Sambuc
55433d6423SLionel Sambuc /* Spare memory, ready to go after initialization, to avoid a
56433d6423SLionel Sambuc * circular dependency on allocating memory and writing it into VM's
57433d6423SLionel Sambuc * page table.
58433d6423SLionel Sambuc */
59433d6423SLionel Sambuc #if SANITYCHECKS
60433d6423SLionel Sambuc #define SPAREPAGES 200
61433d6423SLionel Sambuc #define STATIC_SPAREPAGES 190
62433d6423SLionel Sambuc #else
63433d6423SLionel Sambuc #ifdef __arm__
64433d6423SLionel Sambuc # define SPAREPAGES 150
65433d6423SLionel Sambuc # define STATIC_SPAREPAGES 140
66433d6423SLionel Sambuc #else
67433d6423SLionel Sambuc # define SPAREPAGES 20
68433d6423SLionel Sambuc # define STATIC_SPAREPAGES 15
69433d6423SLionel Sambuc #endif /* __arm__ */
70433d6423SLionel Sambuc #endif
71433d6423SLionel Sambuc
72433d6423SLionel Sambuc #ifdef __i386__
73433d6423SLionel Sambuc static u32_t global_bit = 0;
74433d6423SLionel Sambuc #endif
75433d6423SLionel Sambuc
76433d6423SLionel Sambuc #define SPAREPAGEDIRS 1
77433d6423SLionel Sambuc #define STATIC_SPAREPAGEDIRS 1
78433d6423SLionel Sambuc
79433d6423SLionel Sambuc int missing_sparedirs = SPAREPAGEDIRS;
80433d6423SLionel Sambuc static struct {
81433d6423SLionel Sambuc void *pagedir;
82433d6423SLionel Sambuc phys_bytes phys;
83433d6423SLionel Sambuc } sparepagedirs[SPAREPAGEDIRS];
84433d6423SLionel Sambuc
8563483e02SCristiano Giuffrida #define is_staticaddr(v) ((vir_bytes) (v) < VM_OWN_HEAPSTART)
86433d6423SLionel Sambuc
87433d6423SLionel Sambuc #define MAX_KERNMAPPINGS 10
88433d6423SLionel Sambuc static struct {
89433d6423SLionel Sambuc phys_bytes phys_addr; /* Physical addr. */
90433d6423SLionel Sambuc phys_bytes len; /* Length in bytes. */
91433d6423SLionel Sambuc vir_bytes vir_addr; /* Offset in page table. */
92433d6423SLionel Sambuc int flags;
93433d6423SLionel Sambuc } kern_mappings[MAX_KERNMAPPINGS];
94433d6423SLionel Sambuc int kernmappings = 0;
95433d6423SLionel Sambuc
96433d6423SLionel Sambuc /* Clicks must be pages, as
97433d6423SLionel Sambuc * - they must be page aligned to map them
98433d6423SLionel Sambuc * - they must be a multiple of the page size
99433d6423SLionel Sambuc * - it's inconvenient to have them bigger than pages, because we often want
100433d6423SLionel Sambuc * just one page
101433d6423SLionel Sambuc * May as well require them to be equal then.
102433d6423SLionel Sambuc */
103433d6423SLionel Sambuc #if CLICK_SIZE != VM_PAGE_SIZE
104433d6423SLionel Sambuc #error CLICK_SIZE must be page size.
105433d6423SLionel Sambuc #endif
106433d6423SLionel Sambuc
107433d6423SLionel Sambuc static void *spare_pagequeue;
108433d6423SLionel Sambuc static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
109433d6423SLionel Sambuc __aligned(VM_PAGE_SIZE);
110433d6423SLionel Sambuc
111433d6423SLionel Sambuc #if defined(__arm__)
112433d6423SLionel Sambuc static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
113433d6423SLionel Sambuc #endif
114433d6423SLionel Sambuc
pt_assert(pt_t * pt)11510e6ba68SBen Gras void pt_assert(pt_t *pt)
11610e6ba68SBen Gras {
11710e6ba68SBen Gras char dir[4096];
11810e6ba68SBen Gras pt_clearmapcache();
11910e6ba68SBen Gras if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
12010e6ba68SBen Gras panic("VMCTL_FLUSHTLB failed");
12110e6ba68SBen Gras }
12210e6ba68SBen Gras sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0);
12310e6ba68SBen Gras assert(!memcmp(dir, pt->pt_dir, sizeof(dir)));
12410e6ba68SBen Gras }
12510e6ba68SBen Gras
126433d6423SLionel Sambuc #if SANITYCHECKS
127433d6423SLionel Sambuc /*===========================================================================*
128433d6423SLionel Sambuc * pt_sanitycheck *
129433d6423SLionel Sambuc *===========================================================================*/
pt_sanitycheck(pt_t * pt,const char * file,int line)130433d6423SLionel Sambuc void pt_sanitycheck(pt_t *pt, const char *file, int line)
131433d6423SLionel Sambuc {
132433d6423SLionel Sambuc /* Basic pt sanity check. */
133433d6423SLionel Sambuc int slot;
134433d6423SLionel Sambuc
135433d6423SLionel Sambuc MYASSERT(pt);
136433d6423SLionel Sambuc MYASSERT(pt->pt_dir);
137433d6423SLionel Sambuc MYASSERT(pt->pt_dir_phys);
138433d6423SLionel Sambuc
139433d6423SLionel Sambuc for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
140433d6423SLionel Sambuc if(pt == &vmproc[slot].vm_pt)
141433d6423SLionel Sambuc break;
142433d6423SLionel Sambuc }
143433d6423SLionel Sambuc
144433d6423SLionel Sambuc if(slot >= ELEMENTS(vmproc)) {
145433d6423SLionel Sambuc panic("pt_sanitycheck: passed pt not in any proc");
146433d6423SLionel Sambuc }
147433d6423SLionel Sambuc
148433d6423SLionel Sambuc MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK);
149433d6423SLionel Sambuc }
150433d6423SLionel Sambuc #endif
151433d6423SLionel Sambuc
152433d6423SLionel Sambuc /*===========================================================================*
153433d6423SLionel Sambuc * findhole *
154433d6423SLionel Sambuc *===========================================================================*/
findhole(int pages)155433d6423SLionel Sambuc static u32_t findhole(int pages)
156433d6423SLionel Sambuc {
157433d6423SLionel Sambuc /* Find a space in the virtual address space of VM. */
158433d6423SLionel Sambuc u32_t curv;
159433d6423SLionel Sambuc int pde = 0, try_restart;
16050b7f13fSCristiano Giuffrida static void *lastv = 0;
161433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt;
162433d6423SLionel Sambuc vir_bytes vmin, vmax;
163433d6423SLionel Sambuc u32_t holev = NO_MEM;
164433d6423SLionel Sambuc int holesize = -1;
165433d6423SLionel Sambuc
16663483e02SCristiano Giuffrida vmin = VM_OWN_MMAPBASE;
16763483e02SCristiano Giuffrida vmax = VM_OWN_MMAPTOP;
168433d6423SLionel Sambuc
169433d6423SLionel Sambuc /* Input sanity check. */
170433d6423SLionel Sambuc assert(vmin + VM_PAGE_SIZE >= vmin);
171433d6423SLionel Sambuc assert(vmax >= vmin + VM_PAGE_SIZE);
172433d6423SLionel Sambuc assert((vmin % VM_PAGE_SIZE) == 0);
173433d6423SLionel Sambuc assert((vmax % VM_PAGE_SIZE) == 0);
174433d6423SLionel Sambuc assert(pages > 0);
175433d6423SLionel Sambuc
17650b7f13fSCristiano Giuffrida curv = (u32_t) lastv;
177433d6423SLionel Sambuc if(curv < vmin || curv >= vmax)
178433d6423SLionel Sambuc curv = vmin;
179433d6423SLionel Sambuc
180433d6423SLionel Sambuc try_restart = 1;
181433d6423SLionel Sambuc
182433d6423SLionel Sambuc /* Start looking for a free page starting at vmin. */
183433d6423SLionel Sambuc while(curv < vmax) {
184433d6423SLionel Sambuc int pte;
185433d6423SLionel Sambuc
186433d6423SLionel Sambuc assert(curv >= vmin);
187433d6423SLionel Sambuc assert(curv < vmax);
188433d6423SLionel Sambuc
189433d6423SLionel Sambuc pde = ARCH_VM_PDE(curv);
190433d6423SLionel Sambuc pte = ARCH_VM_PTE(curv);
191433d6423SLionel Sambuc
192433d6423SLionel Sambuc if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
193433d6423SLionel Sambuc (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
194433d6423SLionel Sambuc /* there is a page here - so keep looking for holes */
195433d6423SLionel Sambuc holev = NO_MEM;
196433d6423SLionel Sambuc holesize = 0;
197433d6423SLionel Sambuc } else {
198433d6423SLionel Sambuc /* there is no page here - so we have a hole, a bigger
199433d6423SLionel Sambuc * one if we already had one
200433d6423SLionel Sambuc */
201433d6423SLionel Sambuc if(holev == NO_MEM) {
202433d6423SLionel Sambuc holev = curv;
203433d6423SLionel Sambuc holesize = 1;
204433d6423SLionel Sambuc } else holesize++;
205433d6423SLionel Sambuc
206433d6423SLionel Sambuc assert(holesize > 0);
207433d6423SLionel Sambuc assert(holesize <= pages);
208433d6423SLionel Sambuc
209433d6423SLionel Sambuc /* if it's big enough, return it */
210433d6423SLionel Sambuc if(holesize == pages) {
21150b7f13fSCristiano Giuffrida lastv = (void*) (curv + VM_PAGE_SIZE);
212433d6423SLionel Sambuc return holev;
213433d6423SLionel Sambuc }
214433d6423SLionel Sambuc }
215433d6423SLionel Sambuc
216433d6423SLionel Sambuc curv+=VM_PAGE_SIZE;
217433d6423SLionel Sambuc
218433d6423SLionel Sambuc /* if we reached the limit, start scanning from the beginning if
219433d6423SLionel Sambuc * we haven't looked there yet
220433d6423SLionel Sambuc */
221433d6423SLionel Sambuc if(curv >= vmax && try_restart) {
222433d6423SLionel Sambuc try_restart = 0;
223433d6423SLionel Sambuc curv = vmin;
224433d6423SLionel Sambuc }
225433d6423SLionel Sambuc }
226433d6423SLionel Sambuc
227433d6423SLionel Sambuc printf("VM: out of virtual address space in vm\n");
228433d6423SLionel Sambuc
229433d6423SLionel Sambuc return NO_MEM;
230433d6423SLionel Sambuc }
231433d6423SLionel Sambuc
232433d6423SLionel Sambuc /*===========================================================================*
233433d6423SLionel Sambuc * vm_freepages *
234433d6423SLionel Sambuc *===========================================================================*/
vm_freepages(vir_bytes vir,int pages)235433d6423SLionel Sambuc void vm_freepages(vir_bytes vir, int pages)
236433d6423SLionel Sambuc {
237433d6423SLionel Sambuc assert(!(vir % VM_PAGE_SIZE));
238433d6423SLionel Sambuc
239433d6423SLionel Sambuc if(is_staticaddr(vir)) {
240433d6423SLionel Sambuc printf("VM: not freeing static page\n");
241433d6423SLionel Sambuc return;
242433d6423SLionel Sambuc }
243433d6423SLionel Sambuc
244433d6423SLionel Sambuc if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
245433d6423SLionel Sambuc MAP_NONE, pages*VM_PAGE_SIZE, 0,
246433d6423SLionel Sambuc WMF_OVERWRITE | WMF_FREE) != OK)
247433d6423SLionel Sambuc panic("vm_freepages: pt_writemap failed");
248433d6423SLionel Sambuc
249433d6423SLionel Sambuc vm_self_pages--;
250433d6423SLionel Sambuc
251433d6423SLionel Sambuc #if SANITYCHECKS
252433d6423SLionel Sambuc /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
253433d6423SLionel Sambuc * always trapped, also if not in tlb.
254433d6423SLionel Sambuc */
255433d6423SLionel Sambuc if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
256433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed");
257433d6423SLionel Sambuc }
258433d6423SLionel Sambuc #endif
259433d6423SLionel Sambuc }
260433d6423SLionel Sambuc
261433d6423SLionel Sambuc /*===========================================================================*
262433d6423SLionel Sambuc * vm_getsparepage *
263433d6423SLionel Sambuc *===========================================================================*/
vm_getsparepage(phys_bytes * phys)264433d6423SLionel Sambuc static void *vm_getsparepage(phys_bytes *phys)
265433d6423SLionel Sambuc {
266433d6423SLionel Sambuc void *ptr;
267433d6423SLionel Sambuc if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
268433d6423SLionel Sambuc return NULL;
269433d6423SLionel Sambuc }
270433d6423SLionel Sambuc assert(ptr);
271433d6423SLionel Sambuc return ptr;
272433d6423SLionel Sambuc }
273433d6423SLionel Sambuc
274433d6423SLionel Sambuc /*===========================================================================*
275433d6423SLionel Sambuc * vm_getsparepagedir *
276433d6423SLionel Sambuc *===========================================================================*/
vm_getsparepagedir(phys_bytes * phys)277433d6423SLionel Sambuc static void *vm_getsparepagedir(phys_bytes *phys)
278433d6423SLionel Sambuc {
279433d6423SLionel Sambuc int s;
280433d6423SLionel Sambuc assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
281433d6423SLionel Sambuc for(s = 0; s < SPAREPAGEDIRS; s++) {
282433d6423SLionel Sambuc if(sparepagedirs[s].pagedir) {
283433d6423SLionel Sambuc void *sp;
284433d6423SLionel Sambuc sp = sparepagedirs[s].pagedir;
285433d6423SLionel Sambuc *phys = sparepagedirs[s].phys;
286433d6423SLionel Sambuc sparepagedirs[s].pagedir = NULL;
287433d6423SLionel Sambuc missing_sparedirs++;
288433d6423SLionel Sambuc assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
289433d6423SLionel Sambuc return sp;
290433d6423SLionel Sambuc }
291433d6423SLionel Sambuc }
292433d6423SLionel Sambuc return NULL;
293433d6423SLionel Sambuc }
294433d6423SLionel Sambuc
vm_mappages(phys_bytes p,int pages)295433d6423SLionel Sambuc void *vm_mappages(phys_bytes p, int pages)
296433d6423SLionel Sambuc {
297433d6423SLionel Sambuc vir_bytes loc;
298433d6423SLionel Sambuc int r;
299433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt;
300433d6423SLionel Sambuc
301433d6423SLionel Sambuc /* Where in our virtual address space can we put it? */
302433d6423SLionel Sambuc loc = findhole(pages);
303433d6423SLionel Sambuc if(loc == NO_MEM) {
304433d6423SLionel Sambuc printf("vm_mappages: findhole failed\n");
305433d6423SLionel Sambuc return NULL;
306433d6423SLionel Sambuc }
307433d6423SLionel Sambuc
308433d6423SLionel Sambuc /* Map this page into our address space. */
309433d6423SLionel Sambuc if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
310433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
311433d6423SLionel Sambuc #if defined(__arm__)
312433d6423SLionel Sambuc | ARM_VM_PTE_CACHED
313433d6423SLionel Sambuc #endif
314433d6423SLionel Sambuc , 0)) != OK) {
315433d6423SLionel Sambuc printf("vm_mappages writemap failed\n");
316433d6423SLionel Sambuc return NULL;
317433d6423SLionel Sambuc }
318433d6423SLionel Sambuc
319433d6423SLionel Sambuc if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
320433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed: %d", r);
321433d6423SLionel Sambuc }
322433d6423SLionel Sambuc
323433d6423SLionel Sambuc assert(loc);
324433d6423SLionel Sambuc
325433d6423SLionel Sambuc return (void *) loc;
326433d6423SLionel Sambuc }
327433d6423SLionel Sambuc
328433d6423SLionel Sambuc static int pt_init_done;
329433d6423SLionel Sambuc
330433d6423SLionel Sambuc /*===========================================================================*
331433d6423SLionel Sambuc * vm_allocpage *
332433d6423SLionel Sambuc *===========================================================================*/
vm_allocpages(phys_bytes * phys,int reason,int pages)333433d6423SLionel Sambuc void *vm_allocpages(phys_bytes *phys, int reason, int pages)
334433d6423SLionel Sambuc {
335433d6423SLionel Sambuc /* Allocate a page for use by VM itself. */
336433d6423SLionel Sambuc phys_bytes newpage;
337433d6423SLionel Sambuc static int level = 0;
338433d6423SLionel Sambuc void *ret;
339433d6423SLionel Sambuc u32_t mem_flags = 0;
340433d6423SLionel Sambuc
341433d6423SLionel Sambuc assert(reason >= 0 && reason < VMP_CATEGORIES);
342433d6423SLionel Sambuc
343433d6423SLionel Sambuc assert(pages > 0);
344433d6423SLionel Sambuc
345433d6423SLionel Sambuc level++;
346433d6423SLionel Sambuc
347433d6423SLionel Sambuc assert(level >= 1);
348433d6423SLionel Sambuc assert(level <= 2);
349433d6423SLionel Sambuc
350433d6423SLionel Sambuc if((level > 1) || !pt_init_done) {
351433d6423SLionel Sambuc void *s;
352433d6423SLionel Sambuc
353433d6423SLionel Sambuc if(pages == 1) s=vm_getsparepage(phys);
354433d6423SLionel Sambuc else if(pages == 4) s=vm_getsparepagedir(phys);
355433d6423SLionel Sambuc else panic("%d pages", pages);
356433d6423SLionel Sambuc
357433d6423SLionel Sambuc level--;
358433d6423SLionel Sambuc if(!s) {
359433d6423SLionel Sambuc util_stacktrace();
360433d6423SLionel Sambuc printf("VM: warning: out of spare pages\n");
361433d6423SLionel Sambuc }
362433d6423SLionel Sambuc if(!is_staticaddr(s)) vm_self_pages++;
363433d6423SLionel Sambuc return s;
364433d6423SLionel Sambuc }
365433d6423SLionel Sambuc
366433d6423SLionel Sambuc #if defined(__arm__)
367433d6423SLionel Sambuc if (reason == VMP_PAGEDIR) {
368433d6423SLionel Sambuc mem_flags |= PAF_ALIGN16K;
369433d6423SLionel Sambuc }
370433d6423SLionel Sambuc #endif
371433d6423SLionel Sambuc
372433d6423SLionel Sambuc /* Allocate page of memory for use by VM. As VM
373433d6423SLionel Sambuc * is trusted, we don't have to pre-clear it.
374433d6423SLionel Sambuc */
375433d6423SLionel Sambuc if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
376433d6423SLionel Sambuc level--;
377433d6423SLionel Sambuc printf("VM: vm_allocpage: alloc_mem failed\n");
378433d6423SLionel Sambuc return NULL;
379433d6423SLionel Sambuc }
380433d6423SLionel Sambuc
381433d6423SLionel Sambuc *phys = CLICK2ABS(newpage);
382433d6423SLionel Sambuc
383433d6423SLionel Sambuc if(!(ret = vm_mappages(*phys, pages))) {
384433d6423SLionel Sambuc level--;
385433d6423SLionel Sambuc printf("VM: vm_allocpage: vm_mappages failed\n");
386433d6423SLionel Sambuc return NULL;
387433d6423SLionel Sambuc }
388433d6423SLionel Sambuc
389433d6423SLionel Sambuc level--;
390433d6423SLionel Sambuc vm_self_pages++;
391433d6423SLionel Sambuc
392433d6423SLionel Sambuc return ret;
393433d6423SLionel Sambuc }
394433d6423SLionel Sambuc
vm_allocpage(phys_bytes * phys,int reason)395433d6423SLionel Sambuc void *vm_allocpage(phys_bytes *phys, int reason)
396433d6423SLionel Sambuc {
397433d6423SLionel Sambuc return vm_allocpages(phys, reason, 1);
398433d6423SLionel Sambuc }
399433d6423SLionel Sambuc
400433d6423SLionel Sambuc /*===========================================================================*
401433d6423SLionel Sambuc * vm_pagelock *
402433d6423SLionel Sambuc *===========================================================================*/
vm_pagelock(void * vir,int lockflag)403433d6423SLionel Sambuc void vm_pagelock(void *vir, int lockflag)
404433d6423SLionel Sambuc {
405433d6423SLionel Sambuc /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
406433d6423SLionel Sambuc vir_bytes m = (vir_bytes) vir;
407433d6423SLionel Sambuc int r;
408433d6423SLionel Sambuc u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
409433d6423SLionel Sambuc pt_t *pt;
410433d6423SLionel Sambuc
411433d6423SLionel Sambuc pt = &vmprocess->vm_pt;
412433d6423SLionel Sambuc
413433d6423SLionel Sambuc assert(!(m % VM_PAGE_SIZE));
414433d6423SLionel Sambuc
415433d6423SLionel Sambuc if(!lockflag)
416433d6423SLionel Sambuc flags |= ARCH_VM_PTE_RW;
417433d6423SLionel Sambuc #if defined(__arm__)
418433d6423SLionel Sambuc else
419433d6423SLionel Sambuc flags |= ARCH_VM_PTE_RO;
420433d6423SLionel Sambuc
421433d6423SLionel Sambuc flags |= ARM_VM_PTE_CACHED ;
422433d6423SLionel Sambuc #endif
423433d6423SLionel Sambuc
424433d6423SLionel Sambuc /* Update flags. */
425433d6423SLionel Sambuc if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
426433d6423SLionel Sambuc flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
427433d6423SLionel Sambuc panic("vm_lockpage: pt_writemap failed");
428433d6423SLionel Sambuc }
429433d6423SLionel Sambuc
430433d6423SLionel Sambuc if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
431433d6423SLionel Sambuc panic("VMCTL_FLUSHTLB failed: %d", r);
432433d6423SLionel Sambuc }
433433d6423SLionel Sambuc
434433d6423SLionel Sambuc return;
435433d6423SLionel Sambuc }
436433d6423SLionel Sambuc
437433d6423SLionel Sambuc /*===========================================================================*
438433d6423SLionel Sambuc * vm_addrok *
439433d6423SLionel Sambuc *===========================================================================*/
vm_addrok(void * vir,int writeflag)440433d6423SLionel Sambuc int vm_addrok(void *vir, int writeflag)
441433d6423SLionel Sambuc {
442433d6423SLionel Sambuc pt_t *pt = &vmprocess->vm_pt;
443433d6423SLionel Sambuc int pde, pte;
444433d6423SLionel Sambuc vir_bytes v = (vir_bytes) vir;
445433d6423SLionel Sambuc
446433d6423SLionel Sambuc pde = ARCH_VM_PDE(v);
447433d6423SLionel Sambuc pte = ARCH_VM_PTE(v);
448433d6423SLionel Sambuc
449433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
450433d6423SLionel Sambuc printf("addr not ok: missing pde %d\n", pde);
451433d6423SLionel Sambuc return 0;
452433d6423SLionel Sambuc }
453433d6423SLionel Sambuc
454433d6423SLionel Sambuc #if defined(__i386__)
455433d6423SLionel Sambuc if(writeflag &&
456433d6423SLionel Sambuc !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
457433d6423SLionel Sambuc printf("addr not ok: pde %d present but pde unwritable\n", pde);
458433d6423SLionel Sambuc return 0;
459433d6423SLionel Sambuc }
460433d6423SLionel Sambuc #elif defined(__arm__)
461433d6423SLionel Sambuc if(writeflag &&
462433d6423SLionel Sambuc (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
463433d6423SLionel Sambuc printf("addr not ok: pde %d present but pde unwritable\n", pde);
464433d6423SLionel Sambuc return 0;
465433d6423SLionel Sambuc }
466433d6423SLionel Sambuc
467433d6423SLionel Sambuc #endif
468433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
469433d6423SLionel Sambuc printf("addr not ok: missing pde %d / pte %d\n",
470433d6423SLionel Sambuc pde, pte);
471433d6423SLionel Sambuc return 0;
472433d6423SLionel Sambuc }
473433d6423SLionel Sambuc
474433d6423SLionel Sambuc #if defined(__i386__)
475433d6423SLionel Sambuc if(writeflag &&
476433d6423SLionel Sambuc !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
477433d6423SLionel Sambuc printf("addr not ok: pde %d / pte %d present but unwritable\n",
478433d6423SLionel Sambuc pde, pte);
479433d6423SLionel Sambuc #elif defined(__arm__)
480433d6423SLionel Sambuc if(writeflag &&
481433d6423SLionel Sambuc (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
482433d6423SLionel Sambuc printf("addr not ok: pde %d / pte %d present but unwritable\n",
483433d6423SLionel Sambuc pde, pte);
484433d6423SLionel Sambuc #endif
485433d6423SLionel Sambuc return 0;
486433d6423SLionel Sambuc }
487433d6423SLionel Sambuc
488433d6423SLionel Sambuc return 1;
489433d6423SLionel Sambuc }
490433d6423SLionel Sambuc
491433d6423SLionel Sambuc /*===========================================================================*
492433d6423SLionel Sambuc * pt_ptalloc *
493433d6423SLionel Sambuc *===========================================================================*/
494433d6423SLionel Sambuc static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
495433d6423SLionel Sambuc {
496433d6423SLionel Sambuc /* Allocate a page table and write its address into the page directory. */
497433d6423SLionel Sambuc int i;
498433d6423SLionel Sambuc phys_bytes pt_phys;
499433d6423SLionel Sambuc u32_t *p;
500433d6423SLionel Sambuc
501433d6423SLionel Sambuc /* Argument must make sense. */
502433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
503433d6423SLionel Sambuc assert(!(flags & ~(PTF_ALLFLAGS)));
504433d6423SLionel Sambuc
505433d6423SLionel Sambuc /* We don't expect to overwrite page directory entry, nor
506433d6423SLionel Sambuc * storage for the page table.
507433d6423SLionel Sambuc */
508433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
509433d6423SLionel Sambuc assert(!pt->pt_pt[pde]);
510433d6423SLionel Sambuc
511433d6423SLionel Sambuc /* Get storage for the page table. The allocation call may in fact
512433d6423SLionel Sambuc * recursively create the directory entry as a side effect. In that
513433d6423SLionel Sambuc * case, we free the newly allocated page and do nothing else.
514433d6423SLionel Sambuc */
515433d6423SLionel Sambuc if (!(p = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
516433d6423SLionel Sambuc return ENOMEM;
517433d6423SLionel Sambuc if (pt->pt_pt[pde]) {
518433d6423SLionel Sambuc vm_freepages((vir_bytes) p, 1);
519433d6423SLionel Sambuc assert(pt->pt_pt[pde]);
520433d6423SLionel Sambuc return OK;
521433d6423SLionel Sambuc }
522433d6423SLionel Sambuc pt->pt_pt[pde] = p;
523433d6423SLionel Sambuc
524433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
525433d6423SLionel Sambuc pt->pt_pt[pde][i] = 0; /* Empty entry. */
526433d6423SLionel Sambuc
527433d6423SLionel Sambuc /* Make page directory entry.
528433d6423SLionel Sambuc * The PDE is always 'present,' 'writable,' and 'user accessible,'
529433d6423SLionel Sambuc * relying on the PTE for protection.
530433d6423SLionel Sambuc */
531433d6423SLionel Sambuc #if defined(__i386__)
532433d6423SLionel Sambuc pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
533433d6423SLionel Sambuc | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
534433d6423SLionel Sambuc #elif defined(__arm__)
535433d6423SLionel Sambuc pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
536433d6423SLionel Sambuc | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
537433d6423SLionel Sambuc #endif
538433d6423SLionel Sambuc
539433d6423SLionel Sambuc return OK;
540433d6423SLionel Sambuc }
541433d6423SLionel Sambuc
542433d6423SLionel Sambuc /*===========================================================================*
543433d6423SLionel Sambuc * pt_ptalloc_in_range *
544433d6423SLionel Sambuc *===========================================================================*/
545433d6423SLionel Sambuc int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
546433d6423SLionel Sambuc u32_t flags, int verify)
547433d6423SLionel Sambuc {
548433d6423SLionel Sambuc /* Allocate all the page tables in the range specified. */
549433d6423SLionel Sambuc int pde, first_pde, last_pde;
550433d6423SLionel Sambuc
551433d6423SLionel Sambuc first_pde = ARCH_VM_PDE(start);
552433d6423SLionel Sambuc last_pde = ARCH_VM_PDE(end-1);
553433d6423SLionel Sambuc
554433d6423SLionel Sambuc assert(first_pde >= 0);
555433d6423SLionel Sambuc assert(last_pde < ARCH_VM_DIR_ENTRIES);
556433d6423SLionel Sambuc
557433d6423SLionel Sambuc /* Scan all page-directory entries in the range. */
558433d6423SLionel Sambuc for(pde = first_pde; pde <= last_pde; pde++) {
559433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
560433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
561433d6423SLionel Sambuc int r;
562433d6423SLionel Sambuc if(verify) {
563433d6423SLionel Sambuc printf("pt_ptalloc_in_range: no pde %d\n", pde);
564433d6423SLionel Sambuc return EFAULT;
565433d6423SLionel Sambuc }
566433d6423SLionel Sambuc assert(!pt->pt_dir[pde]);
567433d6423SLionel Sambuc if((r=pt_ptalloc(pt, pde, flags)) != OK) {
568433d6423SLionel Sambuc /* Couldn't do (complete) mapping.
569433d6423SLionel Sambuc * Don't bother freeing any previously
570433d6423SLionel Sambuc * allocated page tables, they're
571433d6423SLionel Sambuc * still writable, don't point to nonsense,
572433d6423SLionel Sambuc * and pt_ptalloc leaves the directory
573433d6423SLionel Sambuc * and other data in a consistent state.
574433d6423SLionel Sambuc */
575433d6423SLionel Sambuc return r;
576433d6423SLionel Sambuc }
577433d6423SLionel Sambuc assert(pt->pt_pt[pde]);
578433d6423SLionel Sambuc }
579433d6423SLionel Sambuc assert(pt->pt_pt[pde]);
580433d6423SLionel Sambuc assert(pt->pt_dir[pde]);
581433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
582433d6423SLionel Sambuc }
583433d6423SLionel Sambuc
584433d6423SLionel Sambuc return OK;
585433d6423SLionel Sambuc }
586433d6423SLionel Sambuc
587433d6423SLionel Sambuc static const char *ptestr(u32_t pte)
588433d6423SLionel Sambuc {
589433d6423SLionel Sambuc #define FLAG(constant, name) { \
590433d6423SLionel Sambuc if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
591433d6423SLionel Sambuc }
592433d6423SLionel Sambuc
593433d6423SLionel Sambuc static char str[30];
594433d6423SLionel Sambuc if(!(pte & ARCH_VM_PTE_PRESENT)) {
595433d6423SLionel Sambuc return "not present";
596433d6423SLionel Sambuc }
597433d6423SLionel Sambuc str[0] = '\0';
598433d6423SLionel Sambuc #if defined(__i386__)
599433d6423SLionel Sambuc FLAG(ARCH_VM_PTE_RW, "W");
600433d6423SLionel Sambuc #elif defined(__arm__)
601433d6423SLionel Sambuc if(pte & ARCH_VM_PTE_RO) {
602433d6423SLionel Sambuc strcat(str, "R ");
603433d6423SLionel Sambuc } else {
604433d6423SLionel Sambuc strcat(str, "W ");
605433d6423SLionel Sambuc }
606433d6423SLionel Sambuc #endif
607433d6423SLionel Sambuc FLAG(ARCH_VM_PTE_USER, "U");
608433d6423SLionel Sambuc #if defined(__i386__)
609433d6423SLionel Sambuc FLAG(I386_VM_PWT, "PWT");
610433d6423SLionel Sambuc FLAG(I386_VM_PCD, "PCD");
611433d6423SLionel Sambuc FLAG(I386_VM_ACC, "ACC");
612433d6423SLionel Sambuc FLAG(I386_VM_DIRTY, "DIRTY");
613433d6423SLionel Sambuc FLAG(I386_VM_PS, "PS");
614433d6423SLionel Sambuc FLAG(I386_VM_GLOBAL, "G");
615433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL1, "AV1");
616433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL2, "AV2");
617433d6423SLionel Sambuc FLAG(I386_VM_PTAVAIL3, "AV3");
618433d6423SLionel Sambuc #elif defined(__arm__)
619433d6423SLionel Sambuc FLAG(ARM_VM_PTE_SUPER, "S");
620433d6423SLionel Sambuc FLAG(ARM_VM_PTE_S, "SH");
621433d6423SLionel Sambuc FLAG(ARM_VM_PTE_WB, "WB");
622433d6423SLionel Sambuc FLAG(ARM_VM_PTE_WT, "WT");
623433d6423SLionel Sambuc #endif
624433d6423SLionel Sambuc
625433d6423SLionel Sambuc return str;
626433d6423SLionel Sambuc }
627433d6423SLionel Sambuc
628433d6423SLionel Sambuc /*===========================================================================*
629433d6423SLionel Sambuc * pt_map_in_range *
630433d6423SLionel Sambuc *===========================================================================*/
631433d6423SLionel Sambuc int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
632433d6423SLionel Sambuc vir_bytes start, vir_bytes end)
633433d6423SLionel Sambuc {
634433d6423SLionel Sambuc /* Transfer all the mappings from the pt of the source process to the pt of
635433d6423SLionel Sambuc * the destination process in the range specified.
636433d6423SLionel Sambuc */
637433d6423SLionel Sambuc int pde, pte;
638433d6423SLionel Sambuc vir_bytes viraddr;
639433d6423SLionel Sambuc pt_t *pt, *dst_pt;
640433d6423SLionel Sambuc
641433d6423SLionel Sambuc pt = &src_vmp->vm_pt;
642433d6423SLionel Sambuc dst_pt = &dst_vmp->vm_pt;
643433d6423SLionel Sambuc
644433d6423SLionel Sambuc end = end ? end : VM_DATATOP;
645433d6423SLionel Sambuc assert(start % VM_PAGE_SIZE == 0);
646433d6423SLionel Sambuc assert(end % VM_PAGE_SIZE == 0);
647433d6423SLionel Sambuc
648433d6423SLionel Sambuc assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end);
649433d6423SLionel Sambuc assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
650433d6423SLionel Sambuc
651433d6423SLionel Sambuc #if LU_DEBUG
652433d6423SLionel Sambuc printf("VM: pt_map_in_range: src = %d, dst = %d\n",
653433d6423SLionel Sambuc src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
654433d6423SLionel Sambuc printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
655433d6423SLionel Sambuc start, ARCH_VM_PDE(start), ARCH_VM_PTE(start),
656433d6423SLionel Sambuc end, ARCH_VM_PDE(end), ARCH_VM_PTE(end));
657433d6423SLionel Sambuc #endif
658433d6423SLionel Sambuc
659433d6423SLionel Sambuc /* Scan all page-table entries in the range. */
660433d6423SLionel Sambuc for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) {
661433d6423SLionel Sambuc pde = ARCH_VM_PDE(viraddr);
662433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
663433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break;
664433d6423SLionel Sambuc continue;
665433d6423SLionel Sambuc }
666433d6423SLionel Sambuc pte = ARCH_VM_PTE(viraddr);
667433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
668433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break;
669433d6423SLionel Sambuc continue;
670433d6423SLionel Sambuc }
671433d6423SLionel Sambuc
672433d6423SLionel Sambuc /* Transfer the mapping. */
673433d6423SLionel Sambuc dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
67410e6ba68SBen Gras assert(dst_pt->pt_pt[pde]);
675433d6423SLionel Sambuc
676433d6423SLionel Sambuc if(viraddr == VM_DATATOP) break;
677433d6423SLionel Sambuc }
678433d6423SLionel Sambuc
679433d6423SLionel Sambuc return OK;
680433d6423SLionel Sambuc }
681433d6423SLionel Sambuc
682433d6423SLionel Sambuc /*===========================================================================*
683433d6423SLionel Sambuc * pt_ptmap *
684433d6423SLionel Sambuc *===========================================================================*/
685433d6423SLionel Sambuc int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
686433d6423SLionel Sambuc {
687433d6423SLionel Sambuc /* Transfer mappings to page dir and page tables from source process and
68863483e02SCristiano Giuffrida * destination process.
689433d6423SLionel Sambuc */
690433d6423SLionel Sambuc int pde, r;
691433d6423SLionel Sambuc phys_bytes physaddr;
692433d6423SLionel Sambuc vir_bytes viraddr;
693433d6423SLionel Sambuc pt_t *pt;
694433d6423SLionel Sambuc
695433d6423SLionel Sambuc pt = &src_vmp->vm_pt;
696433d6423SLionel Sambuc
697433d6423SLionel Sambuc #if LU_DEBUG
698433d6423SLionel Sambuc printf("VM: pt_ptmap: src = %d, dst = %d\n",
699433d6423SLionel Sambuc src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
700433d6423SLionel Sambuc #endif
701433d6423SLionel Sambuc
702433d6423SLionel Sambuc /* Transfer mapping to the page directory. */
703433d6423SLionel Sambuc viraddr = (vir_bytes) pt->pt_dir;
704433d6423SLionel Sambuc physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
705433d6423SLionel Sambuc #if defined(__i386__)
706433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
707433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
708433d6423SLionel Sambuc #elif defined(__arm__)
709433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
710433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
711433d6423SLionel Sambuc ARM_VM_PTE_CACHED ,
712433d6423SLionel Sambuc #endif
713433d6423SLionel Sambuc WMF_OVERWRITE)) != OK) {
714433d6423SLionel Sambuc return r;
715433d6423SLionel Sambuc }
716433d6423SLionel Sambuc #if LU_DEBUG
717433d6423SLionel Sambuc printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
718433d6423SLionel Sambuc viraddr, physaddr);
719433d6423SLionel Sambuc #endif
720433d6423SLionel Sambuc
721433d6423SLionel Sambuc /* Scan all non-reserved page-directory entries. */
72210e6ba68SBen Gras for(pde=0; pde < kern_start_pde; pde++) {
723433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
724433d6423SLionel Sambuc continue;
725433d6423SLionel Sambuc }
726433d6423SLionel Sambuc
72710e6ba68SBen Gras if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); }
72810e6ba68SBen Gras
729433d6423SLionel Sambuc /* Transfer mapping to the page table. */
730433d6423SLionel Sambuc viraddr = (vir_bytes) pt->pt_pt[pde];
731433d6423SLionel Sambuc #if defined(__i386__)
732433d6423SLionel Sambuc physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
733433d6423SLionel Sambuc #elif defined(__arm__)
734433d6423SLionel Sambuc physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
735433d6423SLionel Sambuc #endif
73610e6ba68SBen Gras assert(viraddr);
737433d6423SLionel Sambuc if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
738433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
739433d6423SLionel Sambuc #ifdef __arm__
740433d6423SLionel Sambuc | ARM_VM_PTE_CACHED
741433d6423SLionel Sambuc #endif
742433d6423SLionel Sambuc ,
743433d6423SLionel Sambuc WMF_OVERWRITE)) != OK) {
744433d6423SLionel Sambuc return r;
745433d6423SLionel Sambuc }
746433d6423SLionel Sambuc }
747433d6423SLionel Sambuc
748433d6423SLionel Sambuc return OK;
749433d6423SLionel Sambuc }
750433d6423SLionel Sambuc
751433d6423SLionel Sambuc void pt_clearmapcache(void)
752433d6423SLionel Sambuc {
753433d6423SLionel Sambuc /* Make sure kernel will invalidate tlb when using current
754433d6423SLionel Sambuc * pagetable (i.e. vm's) to make new mappings before new cr3
755433d6423SLionel Sambuc * is loaded.
756433d6423SLionel Sambuc */
757433d6423SLionel Sambuc if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
758433d6423SLionel Sambuc panic("VMCTL_CLEARMAPCACHE failed");
759433d6423SLionel Sambuc }
760433d6423SLionel Sambuc
761433d6423SLionel Sambuc int pt_writable(struct vmproc *vmp, vir_bytes v)
762433d6423SLionel Sambuc {
763433d6423SLionel Sambuc u32_t entry;
764433d6423SLionel Sambuc pt_t *pt = &vmp->vm_pt;
765433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE));
766433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v);
767433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v);
768433d6423SLionel Sambuc
769433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
770433d6423SLionel Sambuc assert(pt->pt_pt[pde]);
771433d6423SLionel Sambuc
772433d6423SLionel Sambuc entry = pt->pt_pt[pde][pte];
773433d6423SLionel Sambuc
774433d6423SLionel Sambuc #if defined(__i386__)
775433d6423SLionel Sambuc return((entry & PTF_WRITE) ? 1 : 0);
776433d6423SLionel Sambuc #elif defined(__arm__)
777433d6423SLionel Sambuc return((entry & ARCH_VM_PTE_RO) ? 0 : 1);
778433d6423SLionel Sambuc #endif
779433d6423SLionel Sambuc }
780433d6423SLionel Sambuc
781433d6423SLionel Sambuc /*===========================================================================*
782433d6423SLionel Sambuc * pt_writemap *
783433d6423SLionel Sambuc *===========================================================================*/
784433d6423SLionel Sambuc int pt_writemap(struct vmproc * vmp,
785433d6423SLionel Sambuc pt_t *pt,
786433d6423SLionel Sambuc vir_bytes v,
787433d6423SLionel Sambuc phys_bytes physaddr,
788433d6423SLionel Sambuc size_t bytes,
789433d6423SLionel Sambuc u32_t flags,
790433d6423SLionel Sambuc u32_t writemapflags)
791433d6423SLionel Sambuc {
792433d6423SLionel Sambuc /* Write mapping into page table. Allocate a new page table if necessary. */
793433d6423SLionel Sambuc /* Page directory and table entries for this virtual address. */
794433d6423SLionel Sambuc int p, pages;
795433d6423SLionel Sambuc int verify = 0;
796433d6423SLionel Sambuc int ret = OK;
797433d6423SLionel Sambuc
798433d6423SLionel Sambuc #ifdef CONFIG_SMP
799433d6423SLionel Sambuc int vminhibit_clear = 0;
800433d6423SLionel Sambuc /* FIXME
801433d6423SLionel Sambuc * don't do it everytime, stop the process only on the first change and
802433d6423SLionel Sambuc * resume the execution on the last change. Do in a wrapper of this
803433d6423SLionel Sambuc * function
804433d6423SLionel Sambuc */
805433d6423SLionel Sambuc if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
806433d6423SLionel Sambuc !(vmp->vm_flags & VMF_EXITING)) {
807433d6423SLionel Sambuc sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
808433d6423SLionel Sambuc vminhibit_clear = 1;
809433d6423SLionel Sambuc }
810433d6423SLionel Sambuc #endif
811433d6423SLionel Sambuc
812433d6423SLionel Sambuc if(writemapflags & WMF_VERIFY)
813433d6423SLionel Sambuc verify = 1;
814433d6423SLionel Sambuc
815433d6423SLionel Sambuc assert(!(bytes % VM_PAGE_SIZE));
816433d6423SLionel Sambuc assert(!(flags & ~(PTF_ALLFLAGS)));
817433d6423SLionel Sambuc
818433d6423SLionel Sambuc pages = bytes / VM_PAGE_SIZE;
819433d6423SLionel Sambuc
820433d6423SLionel Sambuc /* MAP_NONE means to clear the mapping. It doesn't matter
821433d6423SLionel Sambuc * what's actually written into the PTE if PRESENT
822433d6423SLionel Sambuc * isn't on, so we can just write MAP_NONE into it.
823433d6423SLionel Sambuc */
824433d6423SLionel Sambuc assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
825433d6423SLionel Sambuc assert(physaddr != MAP_NONE || !flags);
826433d6423SLionel Sambuc
827433d6423SLionel Sambuc /* First make sure all the necessary page tables are allocated,
828433d6423SLionel Sambuc * before we start writing in any of them, because it's a pain
829433d6423SLionel Sambuc * to undo our work properly.
830433d6423SLionel Sambuc */
831433d6423SLionel Sambuc ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
832433d6423SLionel Sambuc if(ret != OK) {
833433d6423SLionel Sambuc printf("VM: writemap: pt_ptalloc_in_range failed\n");
834433d6423SLionel Sambuc goto resume_exit;
835433d6423SLionel Sambuc }
836433d6423SLionel Sambuc
837433d6423SLionel Sambuc /* Now write in them. */
838433d6423SLionel Sambuc for(p = 0; p < pages; p++) {
839433d6423SLionel Sambuc u32_t entry;
840433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v);
841433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v);
842433d6423SLionel Sambuc
843433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE));
844433d6423SLionel Sambuc assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
845433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
846433d6423SLionel Sambuc
847433d6423SLionel Sambuc /* Page table has to be there. */
848433d6423SLionel Sambuc assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
849433d6423SLionel Sambuc
850433d6423SLionel Sambuc /* We do not expect it to be a bigpage. */
851433d6423SLionel Sambuc assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
852433d6423SLionel Sambuc
853433d6423SLionel Sambuc /* Make sure page directory entry for this page table
854433d6423SLionel Sambuc * is marked present and page table entry is available.
855433d6423SLionel Sambuc */
856433d6423SLionel Sambuc assert(pt->pt_pt[pde]);
857433d6423SLionel Sambuc
858433d6423SLionel Sambuc if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
859433d6423SLionel Sambuc #if defined(__i386__)
860433d6423SLionel Sambuc physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
861433d6423SLionel Sambuc #elif defined(__arm__)
862433d6423SLionel Sambuc physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
863433d6423SLionel Sambuc #endif
864433d6423SLionel Sambuc }
865433d6423SLionel Sambuc
866433d6423SLionel Sambuc if(writemapflags & WMF_FREE) {
867433d6423SLionel Sambuc free_mem(ABS2CLICK(physaddr), 1);
868433d6423SLionel Sambuc }
869433d6423SLionel Sambuc
870433d6423SLionel Sambuc /* Entry we will write. */
871433d6423SLionel Sambuc #if defined(__i386__)
872433d6423SLionel Sambuc entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
873433d6423SLionel Sambuc #elif defined(__arm__)
874433d6423SLionel Sambuc entry = (physaddr & ARM_VM_PTE_MASK) | flags;
875433d6423SLionel Sambuc #endif
876433d6423SLionel Sambuc
877433d6423SLionel Sambuc if(verify) {
878433d6423SLionel Sambuc u32_t maskedentry;
879433d6423SLionel Sambuc maskedentry = pt->pt_pt[pde][pte];
880433d6423SLionel Sambuc #if defined(__i386__)
881433d6423SLionel Sambuc maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
882433d6423SLionel Sambuc #endif
883433d6423SLionel Sambuc /* Verify pagetable entry. */
884433d6423SLionel Sambuc #if defined(__i386__)
885433d6423SLionel Sambuc if(entry & ARCH_VM_PTE_RW) {
886433d6423SLionel Sambuc /* If we expect a writable page, allow a readonly page. */
887433d6423SLionel Sambuc maskedentry |= ARCH_VM_PTE_RW;
888433d6423SLionel Sambuc }
889433d6423SLionel Sambuc #elif defined(__arm__)
890433d6423SLionel Sambuc if(!(entry & ARCH_VM_PTE_RO)) {
891433d6423SLionel Sambuc /* If we expect a writable page, allow a readonly page. */
892433d6423SLionel Sambuc maskedentry &= ~ARCH_VM_PTE_RO;
893433d6423SLionel Sambuc }
894433d6423SLionel Sambuc maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
895433d6423SLionel Sambuc #endif
896433d6423SLionel Sambuc if(maskedentry != entry) {
897433d6423SLionel Sambuc printf("pt_writemap: mismatch: ");
898433d6423SLionel Sambuc #if defined(__i386__)
899433d6423SLionel Sambuc if((entry & ARCH_VM_ADDR_MASK) !=
900433d6423SLionel Sambuc (maskedentry & ARCH_VM_ADDR_MASK)) {
901433d6423SLionel Sambuc #elif defined(__arm__)
902433d6423SLionel Sambuc if((entry & ARM_VM_PTE_MASK) !=
903433d6423SLionel Sambuc (maskedentry & ARM_VM_PTE_MASK)) {
904433d6423SLionel Sambuc #endif
905433d6423SLionel Sambuc printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
906433d6423SLionel Sambuc (long)entry, (long)maskedentry);
907433d6423SLionel Sambuc } else printf("phys ok; ");
908433d6423SLionel Sambuc printf(" flags: found %s; ",
909433d6423SLionel Sambuc ptestr(pt->pt_pt[pde][pte]));
910433d6423SLionel Sambuc printf(" masked %s; ",
911433d6423SLionel Sambuc ptestr(maskedentry));
912433d6423SLionel Sambuc printf(" expected %s\n", ptestr(entry));
913433d6423SLionel Sambuc printf("found 0x%x, wanted 0x%x\n",
914433d6423SLionel Sambuc pt->pt_pt[pde][pte], entry);
915433d6423SLionel Sambuc ret = EFAULT;
916433d6423SLionel Sambuc goto resume_exit;
917433d6423SLionel Sambuc }
918433d6423SLionel Sambuc } else {
919433d6423SLionel Sambuc /* Write pagetable entry. */
920433d6423SLionel Sambuc pt->pt_pt[pde][pte] = entry;
921433d6423SLionel Sambuc }
922433d6423SLionel Sambuc
923433d6423SLionel Sambuc physaddr += VM_PAGE_SIZE;
924433d6423SLionel Sambuc v += VM_PAGE_SIZE;
925433d6423SLionel Sambuc }
926433d6423SLionel Sambuc
927433d6423SLionel Sambuc resume_exit:
928433d6423SLionel Sambuc
929433d6423SLionel Sambuc #ifdef CONFIG_SMP
930433d6423SLionel Sambuc if (vminhibit_clear) {
931433d6423SLionel Sambuc assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
932433d6423SLionel Sambuc !(vmp->vm_flags & VMF_EXITING));
933433d6423SLionel Sambuc sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
934433d6423SLionel Sambuc }
935433d6423SLionel Sambuc #endif
936433d6423SLionel Sambuc
937433d6423SLionel Sambuc return ret;
938433d6423SLionel Sambuc }
939433d6423SLionel Sambuc
940433d6423SLionel Sambuc /*===========================================================================*
941433d6423SLionel Sambuc * pt_checkrange *
942433d6423SLionel Sambuc *===========================================================================*/
943433d6423SLionel Sambuc int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes,
944433d6423SLionel Sambuc int write)
945433d6423SLionel Sambuc {
946433d6423SLionel Sambuc int p, pages;
947433d6423SLionel Sambuc
948433d6423SLionel Sambuc assert(!(bytes % VM_PAGE_SIZE));
949433d6423SLionel Sambuc
950433d6423SLionel Sambuc pages = bytes / VM_PAGE_SIZE;
951433d6423SLionel Sambuc
952433d6423SLionel Sambuc for(p = 0; p < pages; p++) {
953433d6423SLionel Sambuc int pde = ARCH_VM_PDE(v);
954433d6423SLionel Sambuc int pte = ARCH_VM_PTE(v);
955433d6423SLionel Sambuc
956433d6423SLionel Sambuc assert(!(v % VM_PAGE_SIZE));
957433d6423SLionel Sambuc assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
958433d6423SLionel Sambuc assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
959433d6423SLionel Sambuc
960433d6423SLionel Sambuc /* Page table has to be there. */
961433d6423SLionel Sambuc if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
962433d6423SLionel Sambuc return EFAULT;
963433d6423SLionel Sambuc
964433d6423SLionel Sambuc /* Make sure page directory entry for this page table
965433d6423SLionel Sambuc * is marked present and page table entry is available.
966433d6423SLionel Sambuc */
967433d6423SLionel Sambuc assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);
968433d6423SLionel Sambuc
969433d6423SLionel Sambuc if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
970433d6423SLionel Sambuc return EFAULT;
971433d6423SLionel Sambuc }
972433d6423SLionel Sambuc
973433d6423SLionel Sambuc #if defined(__i386__)
974433d6423SLionel Sambuc if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
975433d6423SLionel Sambuc #elif defined(__arm__)
976433d6423SLionel Sambuc if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
977433d6423SLionel Sambuc #endif
978433d6423SLionel Sambuc return EFAULT;
979433d6423SLionel Sambuc }
980433d6423SLionel Sambuc
981433d6423SLionel Sambuc v += VM_PAGE_SIZE;
982433d6423SLionel Sambuc }
983433d6423SLionel Sambuc
984433d6423SLionel Sambuc return OK;
985433d6423SLionel Sambuc }
986433d6423SLionel Sambuc
987433d6423SLionel Sambuc /*===========================================================================*
988433d6423SLionel Sambuc * pt_new *
989433d6423SLionel Sambuc *===========================================================================*/
990433d6423SLionel Sambuc int pt_new(pt_t *pt)
991433d6423SLionel Sambuc {
992433d6423SLionel Sambuc /* Allocate a pagetable root. Allocate a page-aligned page directory
993433d6423SLionel Sambuc * and set them to 0 (indicating no page tables are allocated). Lookup
994433d6423SLionel Sambuc * its physical address as we'll need that in the future. Verify it's
995433d6423SLionel Sambuc * page-aligned.
996433d6423SLionel Sambuc */
997433d6423SLionel Sambuc int i, r;
998433d6423SLionel Sambuc
999433d6423SLionel Sambuc /* Don't ever re-allocate/re-move a certain process slot's
1000433d6423SLionel Sambuc * page directory once it's been created. This is a fraction
1001433d6423SLionel Sambuc * faster, but also avoids having to invalidate the page
1002433d6423SLionel Sambuc * mappings from in-kernel page tables pointing to
1003433d6423SLionel Sambuc * the page directories (the page_directories data).
1004433d6423SLionel Sambuc */
1005433d6423SLionel Sambuc if(!pt->pt_dir &&
1006433d6423SLionel Sambuc !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
1007433d6423SLionel Sambuc VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
1008433d6423SLionel Sambuc return ENOMEM;
1009433d6423SLionel Sambuc }
1010433d6423SLionel Sambuc
1011433d6423SLionel Sambuc assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1012433d6423SLionel Sambuc
1013433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
1014433d6423SLionel Sambuc pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
1015433d6423SLionel Sambuc pt->pt_pt[i] = NULL;
1016433d6423SLionel Sambuc }
1017433d6423SLionel Sambuc
1018433d6423SLionel Sambuc /* Where to start looking for free virtual address space? */
1019433d6423SLionel Sambuc pt->pt_virtop = 0;
1020433d6423SLionel Sambuc
1021433d6423SLionel Sambuc /* Map in kernel. */
1022433d6423SLionel Sambuc if((r=pt_mapkernel(pt)) != OK)
1023433d6423SLionel Sambuc return r;
1024433d6423SLionel Sambuc
1025433d6423SLionel Sambuc return OK;
1026433d6423SLionel Sambuc }
1027433d6423SLionel Sambuc
1028433d6423SLionel Sambuc static int freepde(void)
1029433d6423SLionel Sambuc {
1030433d6423SLionel Sambuc int p = kernel_boot_info.freepde_start++;
1031433d6423SLionel Sambuc assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES);
1032433d6423SLionel Sambuc return p;
1033433d6423SLionel Sambuc }
1034433d6423SLionel Sambuc
103510e6ba68SBen Gras void pt_allocate_kernel_mapped_pagetables(void)
103610e6ba68SBen Gras {
103710e6ba68SBen Gras /* Reserve PDEs available for mapping in the page directories. */
103810e6ba68SBen Gras int pd;
103910e6ba68SBen Gras for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
104010e6ba68SBen Gras struct pdm *pdm = &pagedir_mappings[pd];
104110e6ba68SBen Gras if(!pdm->pdeno) {
104210e6ba68SBen Gras pdm->pdeno = freepde();
104310e6ba68SBen Gras assert(pdm->pdeno);
104410e6ba68SBen Gras }
104510e6ba68SBen Gras phys_bytes ph;
104610e6ba68SBen Gras
104710e6ba68SBen Gras /* Allocate us a page table in which to
104810e6ba68SBen Gras * remember page directory pointers.
104910e6ba68SBen Gras */
105010e6ba68SBen Gras if(!(pdm->page_directories =
105110e6ba68SBen Gras vm_allocpage(&ph, VMP_PAGETABLE))) {
105210e6ba68SBen Gras panic("no virt addr for vm mappings");
105310e6ba68SBen Gras }
105410e6ba68SBen Gras memset(pdm->page_directories, 0, VM_PAGE_SIZE);
105510e6ba68SBen Gras pdm->phys = ph;
105610e6ba68SBen Gras
105710e6ba68SBen Gras #if defined(__i386__)
105810e6ba68SBen Gras pdm->val = (ph & ARCH_VM_ADDR_MASK) |
105910e6ba68SBen Gras ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
106010e6ba68SBen Gras #elif defined(__arm__)
106110e6ba68SBen Gras pdm->val = (ph & ARCH_VM_PDE_MASK)
106210e6ba68SBen Gras | ARCH_VM_PDE_PRESENT
106310e6ba68SBen Gras | ARM_VM_PTE_CACHED
106410e6ba68SBen Gras | ARM_VM_PDE_DOMAIN; //LSC FIXME
106510e6ba68SBen Gras #endif
106610e6ba68SBen Gras }
106710e6ba68SBen Gras }
106810e6ba68SBen Gras
1069*683f1fcaSBen Gras static void pt_copy(pt_t *dst, pt_t *src)
1070*683f1fcaSBen Gras {
1071*683f1fcaSBen Gras int pde;
1072*683f1fcaSBen Gras for(pde=0; pde < kern_start_pde; pde++) {
1073*683f1fcaSBen Gras if(!(src->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
1074*683f1fcaSBen Gras continue;
1075*683f1fcaSBen Gras }
1076*683f1fcaSBen Gras assert(!(src->pt_dir[pde] & ARCH_VM_BIGPAGE));
1077*683f1fcaSBen Gras if(!src->pt_pt[pde]) { panic("pde %d empty\n", pde); }
1078*683f1fcaSBen Gras if(pt_ptalloc(dst, pde, 0) != OK)
1079*683f1fcaSBen Gras panic("pt_ptalloc failed");
1080*683f1fcaSBen Gras memcpy(dst->pt_pt[pde], src->pt_pt[pde],
1081*683f1fcaSBen Gras ARCH_VM_PT_ENTRIES * sizeof(*dst->pt_pt[pde]));
1082*683f1fcaSBen Gras }
1083*683f1fcaSBen Gras }
1084*683f1fcaSBen Gras
1085433d6423SLionel Sambuc /*===========================================================================*
1086433d6423SLionel Sambuc * pt_init *
1087433d6423SLionel Sambuc *===========================================================================*/
1088433d6423SLionel Sambuc void pt_init(void)
1089433d6423SLionel Sambuc {
1090*683f1fcaSBen Gras pt_t *newpt, newpt_dyn;
1091433d6423SLionel Sambuc int s, r, p;
109210e6ba68SBen Gras phys_bytes phys;
1093433d6423SLionel Sambuc vir_bytes sparepages_mem;
1094433d6423SLionel Sambuc #if defined(__arm__)
1095433d6423SLionel Sambuc vir_bytes sparepagedirs_mem;
1096433d6423SLionel Sambuc #endif
1097433d6423SLionel Sambuc static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
1098433d6423SLionel Sambuc int m = kernel_boot_info.kern_mod;
1099433d6423SLionel Sambuc #if defined(__i386__)
1100433d6423SLionel Sambuc int global_bit_ok = 0;
1101433d6423SLionel Sambuc u32_t mypdbr; /* Page Directory Base Register (cr3) value */
1102433d6423SLionel Sambuc #elif defined(__arm__)
1103433d6423SLionel Sambuc u32_t myttbr;
1104433d6423SLionel Sambuc #endif
1105433d6423SLionel Sambuc
1106433d6423SLionel Sambuc /* Find what the physical location of the kernel is. */
1107433d6423SLionel Sambuc assert(m >= 0);
1108433d6423SLionel Sambuc assert(m < kernel_boot_info.mods_with_kernel);
1109433d6423SLionel Sambuc assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
1110433d6423SLionel Sambuc kern_mb_mod = &kernel_boot_info.module_list[m];
1111433d6423SLionel Sambuc kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
1112433d6423SLionel Sambuc assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
1113433d6423SLionel Sambuc assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
1114433d6423SLionel Sambuc kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;
1115433d6423SLionel Sambuc
1116433d6423SLionel Sambuc /* Get ourselves spare pages. */
1117433d6423SLionel Sambuc sparepages_mem = (vir_bytes) static_sparepages;
1118433d6423SLionel Sambuc assert(!(sparepages_mem % VM_PAGE_SIZE));
1119433d6423SLionel Sambuc
1120433d6423SLionel Sambuc #if defined(__arm__)
1121433d6423SLionel Sambuc /* Get ourselves spare pagedirs. */
1122433d6423SLionel Sambuc sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
1123433d6423SLionel Sambuc assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
1124433d6423SLionel Sambuc #endif
1125433d6423SLionel Sambuc
1126433d6423SLionel Sambuc /* Spare pages are used to allocate memory before VM has its own page
1127433d6423SLionel Sambuc * table that things (i.e. arbitrary physical memory) can be mapped into.
1128433d6423SLionel Sambuc * We get it by pre-allocating it in our bss (allocated and mapped in by
1129433d6423SLionel Sambuc * the kernel) in static_sparepages. We also need the physical addresses
1130433d6423SLionel Sambuc * though; we look them up now so they are ready for use.
1131433d6423SLionel Sambuc */
1132433d6423SLionel Sambuc #if defined(__arm__)
1133433d6423SLionel Sambuc missing_sparedirs = 0;
1134433d6423SLionel Sambuc assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
1135433d6423SLionel Sambuc for(s = 0; s < SPAREPAGEDIRS; s++) {
1136433d6423SLionel Sambuc vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
1137433d6423SLionel Sambuc phys_bytes ph;
1138433d6423SLionel Sambuc if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1139433d6423SLionel Sambuc ARCH_PAGEDIR_SIZE, &ph)) != OK)
1140433d6423SLionel Sambuc panic("pt_init: sys_umap failed: %d", r);
1141433d6423SLionel Sambuc if(s >= STATIC_SPAREPAGEDIRS) {
1142433d6423SLionel Sambuc sparepagedirs[s].pagedir = NULL;
1143433d6423SLionel Sambuc missing_sparedirs++;
1144433d6423SLionel Sambuc continue;
1145433d6423SLionel Sambuc }
1146433d6423SLionel Sambuc sparepagedirs[s].pagedir = (void *) v;
1147433d6423SLionel Sambuc sparepagedirs[s].phys = ph;
1148433d6423SLionel Sambuc }
1149433d6423SLionel Sambuc #endif
1150433d6423SLionel Sambuc
1151433d6423SLionel Sambuc if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
1152433d6423SLionel Sambuc panic("reservedqueue_new for single pages failed");
1153433d6423SLionel Sambuc
1154433d6423SLionel Sambuc assert(STATIC_SPAREPAGES < SPAREPAGES);
1155433d6423SLionel Sambuc for(s = 0; s < STATIC_SPAREPAGES; s++) {
1156433d6423SLionel Sambuc void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
1157433d6423SLionel Sambuc phys_bytes ph;
1158433d6423SLionel Sambuc if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1159433d6423SLionel Sambuc VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
1160433d6423SLionel Sambuc panic("pt_init: sys_umap failed: %d", r);
1161433d6423SLionel Sambuc reservedqueue_add(spare_pagequeue, v, ph);
1162433d6423SLionel Sambuc }
1163433d6423SLionel Sambuc
1164433d6423SLionel Sambuc #if defined(__i386__)
1165433d6423SLionel Sambuc /* global bit and 4MB pages available? */
1166433d6423SLionel Sambuc global_bit_ok = _cpufeature(_CPUF_I386_PGE);
1167433d6423SLionel Sambuc bigpage_ok = _cpufeature(_CPUF_I386_PSE);
1168433d6423SLionel Sambuc
1169433d6423SLionel Sambuc /* Set bit for PTE's and PDE's if available. */
1170433d6423SLionel Sambuc if(global_bit_ok)
1171433d6423SLionel Sambuc global_bit = I386_VM_GLOBAL;
1172433d6423SLionel Sambuc #endif
1173433d6423SLionel Sambuc
1174433d6423SLionel Sambuc /* Now reserve another pde for kernel's own mappings. */
1175433d6423SLionel Sambuc {
1176433d6423SLionel Sambuc int kernmap_pde;
1177433d6423SLionel Sambuc phys_bytes addr, len;
1178433d6423SLionel Sambuc int flags, pindex = 0;
1179433d6423SLionel Sambuc u32_t offset = 0;
1180433d6423SLionel Sambuc
1181433d6423SLionel Sambuc kernmap_pde = freepde();
1182433d6423SLionel Sambuc offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
1183433d6423SLionel Sambuc
1184433d6423SLionel Sambuc while(sys_vmctl_get_mapping(pindex, &addr, &len,
1185433d6423SLionel Sambuc &flags) == OK) {
1186433d6423SLionel Sambuc int usedpde;
1187433d6423SLionel Sambuc vir_bytes vir;
1188433d6423SLionel Sambuc if(pindex >= MAX_KERNMAPPINGS)
1189433d6423SLionel Sambuc panic("VM: too many kernel mappings: %d", pindex);
1190433d6423SLionel Sambuc kern_mappings[pindex].phys_addr = addr;
1191433d6423SLionel Sambuc kern_mappings[pindex].len = len;
1192433d6423SLionel Sambuc kern_mappings[pindex].flags = flags;
1193433d6423SLionel Sambuc kern_mappings[pindex].vir_addr = offset;
1194433d6423SLionel Sambuc kern_mappings[pindex].flags =
1195433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT;
1196433d6423SLionel Sambuc if(flags & VMMF_UNCACHED)
1197433d6423SLionel Sambuc #if defined(__i386__)
1198433d6423SLionel Sambuc kern_mappings[pindex].flags |= PTF_NOCACHE;
1199433d6423SLionel Sambuc #elif defined(__arm__)
1200433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE;
1201433d6423SLionel Sambuc else {
1202433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_CACHED;
1203433d6423SLionel Sambuc }
1204433d6423SLionel Sambuc #endif
1205433d6423SLionel Sambuc if(flags & VMMF_USER)
1206433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_USER;
1207433d6423SLionel Sambuc #if defined(__arm__)
1208433d6423SLionel Sambuc else
1209433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER;
1210433d6423SLionel Sambuc #endif
1211433d6423SLionel Sambuc if(flags & VMMF_WRITE)
1212433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_RW;
1213433d6423SLionel Sambuc #if defined(__arm__)
1214433d6423SLionel Sambuc else
1215433d6423SLionel Sambuc kern_mappings[pindex].flags |= ARCH_VM_PTE_RO;
1216433d6423SLionel Sambuc #endif
1217433d6423SLionel Sambuc
1218433d6423SLionel Sambuc #if defined(__i386__)
1219433d6423SLionel Sambuc if(flags & VMMF_GLO)
1220433d6423SLionel Sambuc kern_mappings[pindex].flags |= I386_VM_GLOBAL;
1221433d6423SLionel Sambuc #endif
1222433d6423SLionel Sambuc
1223433d6423SLionel Sambuc if(addr % VM_PAGE_SIZE)
1224433d6423SLionel Sambuc panic("VM: addr unaligned: %lu", addr);
1225433d6423SLionel Sambuc if(len % VM_PAGE_SIZE)
1226433d6423SLionel Sambuc panic("VM: len unaligned: %lu", len);
1227433d6423SLionel Sambuc vir = offset;
1228433d6423SLionel Sambuc if(sys_vmctl_reply_mapping(pindex, vir) != OK)
1229433d6423SLionel Sambuc panic("VM: reply failed");
1230433d6423SLionel Sambuc offset += len;
1231433d6423SLionel Sambuc pindex++;
1232433d6423SLionel Sambuc kernmappings++;
1233433d6423SLionel Sambuc
1234433d6423SLionel Sambuc usedpde = ARCH_VM_PDE(offset);
1235433d6423SLionel Sambuc while(usedpde > kernmap_pde) {
1236433d6423SLionel Sambuc int newpde = freepde();
1237433d6423SLionel Sambuc assert(newpde == kernmap_pde+1);
1238433d6423SLionel Sambuc kernmap_pde = newpde;
1239433d6423SLionel Sambuc }
1240433d6423SLionel Sambuc }
1241433d6423SLionel Sambuc }
1242433d6423SLionel Sambuc
124310e6ba68SBen Gras pt_allocate_kernel_mapped_pagetables();
1244433d6423SLionel Sambuc
1245433d6423SLionel Sambuc /* Allright. Now. We have to make our own page directory and page tables,
1246433d6423SLionel Sambuc * that the kernel has already set up, accessible to us. It's easier to
1247433d6423SLionel Sambuc * understand if we just copy all the required pages (i.e. page directory
1248433d6423SLionel Sambuc * and page tables), and set up the pointers as if VM had done it itself.
1249433d6423SLionel Sambuc *
1250433d6423SLionel Sambuc * This allocation will happen without using any page table, and just
1251433d6423SLionel Sambuc * uses spare pages.
1252433d6423SLionel Sambuc */
1253433d6423SLionel Sambuc newpt = &vmprocess->vm_pt;
1254433d6423SLionel Sambuc if(pt_new(newpt) != OK)
1255433d6423SLionel Sambuc panic("vm pt_new failed");
1256433d6423SLionel Sambuc
1257433d6423SLionel Sambuc /* Get our current pagedir so we can see it. */
1258433d6423SLionel Sambuc #if defined(__i386__)
1259433d6423SLionel Sambuc if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
1260433d6423SLionel Sambuc #elif defined(__arm__)
1261433d6423SLionel Sambuc if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
1262433d6423SLionel Sambuc #endif
1263433d6423SLionel Sambuc
1264433d6423SLionel Sambuc panic("VM: sys_vmctl_get_pdbr failed");
1265433d6423SLionel Sambuc #if defined(__i386__)
1266433d6423SLionel Sambuc if(sys_vircopy(NONE, mypdbr, SELF,
1267433d6423SLionel Sambuc (vir_bytes) currentpagedir, VM_PAGE_SIZE, 0) != OK)
1268433d6423SLionel Sambuc #elif defined(__arm__)
1269433d6423SLionel Sambuc if(sys_vircopy(NONE, myttbr, SELF,
1270433d6423SLionel Sambuc (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE, 0) != OK)
1271433d6423SLionel Sambuc #endif
1272433d6423SLionel Sambuc panic("VM: sys_vircopy failed");
1273433d6423SLionel Sambuc
1274433d6423SLionel Sambuc /* We have mapped in kernel ourselves; now copy mappings for VM
1275433d6423SLionel Sambuc * that kernel made, including allocations for BSS. Skip identity
1276433d6423SLionel Sambuc * mapping bits; just map in VM.
1277433d6423SLionel Sambuc */
1278433d6423SLionel Sambuc for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
1279433d6423SLionel Sambuc u32_t entry = currentpagedir[p];
1280433d6423SLionel Sambuc phys_bytes ptaddr_kern, ptaddr_us;
1281433d6423SLionel Sambuc
1282433d6423SLionel Sambuc /* BIGPAGEs are kernel mapping (do ourselves) or boot
1283433d6423SLionel Sambuc * identity mapping (don't want).
1284433d6423SLionel Sambuc */
1285433d6423SLionel Sambuc if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
1286433d6423SLionel Sambuc if((entry & ARCH_VM_BIGPAGE)) continue;
1287433d6423SLionel Sambuc
1288433d6423SLionel Sambuc if(pt_ptalloc(newpt, p, 0) != OK)
1289433d6423SLionel Sambuc panic("pt_ptalloc failed");
1290433d6423SLionel Sambuc assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);
1291433d6423SLionel Sambuc
1292433d6423SLionel Sambuc #if defined(__i386__)
1293433d6423SLionel Sambuc ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
1294433d6423SLionel Sambuc ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
1295433d6423SLionel Sambuc #elif defined(__arm__)
1296433d6423SLionel Sambuc ptaddr_kern = entry & ARCH_VM_PDE_MASK;
1297433d6423SLionel Sambuc ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
1298433d6423SLionel Sambuc #endif
1299433d6423SLionel Sambuc
1300433d6423SLionel Sambuc /* Copy kernel-initialized pagetable contents into our
1301433d6423SLionel Sambuc * normally accessible pagetable.
1302433d6423SLionel Sambuc */
1303433d6423SLionel Sambuc if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
1304433d6423SLionel Sambuc panic("pt_init: abscopy failed");
1305433d6423SLionel Sambuc }
1306433d6423SLionel Sambuc
1307433d6423SLionel Sambuc /* Inform kernel vm has a newly built page table. */
1308433d6423SLionel Sambuc assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
1309433d6423SLionel Sambuc pt_bind(newpt, &vmproc[VM_PROC_NR]);
1310433d6423SLionel Sambuc
1311433d6423SLionel Sambuc pt_init_done = 1;
1312433d6423SLionel Sambuc
131310e6ba68SBen Gras /* VM is now fully functional in that it can dynamically allocate memory
131410e6ba68SBen Gras * for itself.
131510e6ba68SBen Gras *
131610e6ba68SBen Gras * We don't want to keep using the bootstrap statically allocated spare
131710e6ba68SBen Gras * pages though, as the physical addresses will change on liveupdate. So we
131810e6ba68SBen Gras * re-do part of the initialization now with purely dynamically allocated
131910e6ba68SBen Gras * memory. First throw out the static pool.
1320*683f1fcaSBen Gras *
1321*683f1fcaSBen Gras * Then allocate the kernel-shared-pagetables and VM pagetables with dynamic
1322*683f1fcaSBen Gras * memory.
132310e6ba68SBen Gras */
132410e6ba68SBen Gras
132510e6ba68SBen Gras alloc_cycle(); /* Make sure allocating works */
132610e6ba68SBen Gras while(vm_getsparepage(&phys)) ; /* Use up all static pages */
132710e6ba68SBen Gras alloc_cycle(); /* Refill spares with dynamic */
132810e6ba68SBen Gras pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */
132910e6ba68SBen Gras pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */
133010e6ba68SBen Gras pt_mapkernel(newpt); /* Rewrite pagetable info */
133110e6ba68SBen Gras
133210e6ba68SBen Gras /* Flush TLB just in case any of those mappings have been touched */
133310e6ba68SBen Gras if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
133410e6ba68SBen Gras panic("VMCTL_FLUSHTLB failed");
133510e6ba68SBen Gras }
133610e6ba68SBen Gras
1337*683f1fcaSBen Gras /* Recreate VM page table with dynamic-only allocations */
1338*683f1fcaSBen Gras memset(&newpt_dyn, 0, sizeof(newpt_dyn));
1339*683f1fcaSBen Gras pt_new(&newpt_dyn);
1340*683f1fcaSBen Gras pt_copy(&newpt_dyn, newpt);
1341*683f1fcaSBen Gras memcpy(newpt, &newpt_dyn, sizeof(*newpt));
1342*683f1fcaSBen Gras
1343*683f1fcaSBen Gras pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */
1344*683f1fcaSBen Gras pt_mapkernel(newpt); /* Rewrite pagetable info */
1345*683f1fcaSBen Gras
1346*683f1fcaSBen Gras /* Flush TLB just in case any of those mappings have been touched */
1347*683f1fcaSBen Gras if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
1348*683f1fcaSBen Gras panic("VMCTL_FLUSHTLB failed");
1349*683f1fcaSBen Gras }
1350*683f1fcaSBen Gras
1351433d6423SLionel Sambuc /* All OK. */
1352433d6423SLionel Sambuc return;
1353433d6423SLionel Sambuc }
1354433d6423SLionel Sambuc
1355433d6423SLionel Sambuc /*===========================================================================*
1356433d6423SLionel Sambuc * pt_bind *
1357433d6423SLionel Sambuc *===========================================================================*/
1358433d6423SLionel Sambuc int pt_bind(pt_t *pt, struct vmproc *who)
1359433d6423SLionel Sambuc {
1360433d6423SLionel Sambuc int procslot, pdeslot;
1361433d6423SLionel Sambuc u32_t phys;
1362433d6423SLionel Sambuc void *pdes;
1363433d6423SLionel Sambuc int pagedir_pde;
1364433d6423SLionel Sambuc int slots_per_pde;
1365433d6423SLionel Sambuc int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE;
1366433d6423SLionel Sambuc struct pdm *pdm;
1367433d6423SLionel Sambuc
1368433d6423SLionel Sambuc slots_per_pde = ARCH_VM_PT_ENTRIES / pages_per_pagedir;
1369433d6423SLionel Sambuc
1370433d6423SLionel Sambuc /* Basic sanity checks. */
1371433d6423SLionel Sambuc assert(who);
1372433d6423SLionel Sambuc assert(who->vm_flags & VMF_INUSE);
1373433d6423SLionel Sambuc assert(pt);
1374433d6423SLionel Sambuc
1375433d6423SLionel Sambuc procslot = who->vm_slot;
1376433d6423SLionel Sambuc pdm = &pagedir_mappings[procslot/slots_per_pde];
1377433d6423SLionel Sambuc pdeslot = procslot%slots_per_pde;
1378433d6423SLionel Sambuc pagedir_pde = pdm->pdeno;
1379433d6423SLionel Sambuc assert(pdeslot >= 0);
1380433d6423SLionel Sambuc assert(procslot < ELEMENTS(vmproc));
1381433d6423SLionel Sambuc assert(pdeslot < ARCH_VM_PT_ENTRIES / pages_per_pagedir);
1382433d6423SLionel Sambuc assert(pagedir_pde >= 0);
1383433d6423SLionel Sambuc
1384433d6423SLionel Sambuc #if defined(__i386__)
1385433d6423SLionel Sambuc phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
1386433d6423SLionel Sambuc #elif defined(__arm__)
1387433d6423SLionel Sambuc phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
1388433d6423SLionel Sambuc #endif
1389433d6423SLionel Sambuc assert(pt->pt_dir_phys == phys);
1390433d6423SLionel Sambuc assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1391433d6423SLionel Sambuc
1392433d6423SLionel Sambuc /* Update "page directory pagetable." */
1393433d6423SLionel Sambuc #if defined(__i386__)
1394433d6423SLionel Sambuc pdm->page_directories[pdeslot] =
1395433d6423SLionel Sambuc phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW;
1396433d6423SLionel Sambuc #elif defined(__arm__)
1397433d6423SLionel Sambuc {
1398433d6423SLionel Sambuc int i;
1399433d6423SLionel Sambuc for (i = 0; i < pages_per_pagedir; i++) {
1400433d6423SLionel Sambuc pdm->page_directories[pdeslot*pages_per_pagedir+i] =
1401433d6423SLionel Sambuc (phys+i*VM_PAGE_SIZE)
1402433d6423SLionel Sambuc | ARCH_VM_PTE_PRESENT
1403433d6423SLionel Sambuc | ARCH_VM_PTE_RW
1404433d6423SLionel Sambuc | ARM_VM_PTE_CACHED
1405433d6423SLionel Sambuc | ARCH_VM_PTE_USER; //LSC FIXME
1406433d6423SLionel Sambuc }
1407433d6423SLionel Sambuc }
1408433d6423SLionel Sambuc #endif
1409433d6423SLionel Sambuc
1410433d6423SLionel Sambuc /* This is where the PDE's will be visible to the kernel
1411433d6423SLionel Sambuc * in its address space.
1412433d6423SLionel Sambuc */
1413433d6423SLionel Sambuc pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE +
1414433d6423SLionel Sambuc #if defined(__i386__)
1415433d6423SLionel Sambuc pdeslot * VM_PAGE_SIZE);
1416433d6423SLionel Sambuc #elif defined(__arm__)
1417433d6423SLionel Sambuc pdeslot * ARCH_PAGEDIR_SIZE);
1418433d6423SLionel Sambuc #endif
1419433d6423SLionel Sambuc
1420433d6423SLionel Sambuc /* Tell kernel about new page table root. */
1421433d6423SLionel Sambuc return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys , pdes);
1422433d6423SLionel Sambuc }
1423433d6423SLionel Sambuc
1424433d6423SLionel Sambuc /*===========================================================================*
1425433d6423SLionel Sambuc * pt_free *
1426433d6423SLionel Sambuc *===========================================================================*/
1427433d6423SLionel Sambuc void pt_free(pt_t *pt)
1428433d6423SLionel Sambuc {
1429433d6423SLionel Sambuc /* Free memory associated with this pagetable. */
1430433d6423SLionel Sambuc int i;
1431433d6423SLionel Sambuc
1432433d6423SLionel Sambuc for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++)
1433433d6423SLionel Sambuc if(pt->pt_pt[i])
1434433d6423SLionel Sambuc vm_freepages((vir_bytes) pt->pt_pt[i], 1);
1435433d6423SLionel Sambuc
1436433d6423SLionel Sambuc return;
1437433d6423SLionel Sambuc }
1438433d6423SLionel Sambuc
1439433d6423SLionel Sambuc /*===========================================================================*
1440433d6423SLionel Sambuc * pt_mapkernel *
1441433d6423SLionel Sambuc *===========================================================================*/
1442433d6423SLionel Sambuc int pt_mapkernel(pt_t *pt)
1443433d6423SLionel Sambuc {
1444433d6423SLionel Sambuc int i;
1445433d6423SLionel Sambuc int kern_pde = kern_start_pde;
1446433d6423SLionel Sambuc phys_bytes addr, mapped = 0;
1447433d6423SLionel Sambuc
1448433d6423SLionel Sambuc /* Any page table needs to map in the kernel address space. */
1449433d6423SLionel Sambuc assert(bigpage_ok);
1450433d6423SLionel Sambuc assert(kern_pde >= 0);
1451433d6423SLionel Sambuc
1452433d6423SLionel Sambuc /* pt_init() has made sure this is ok. */
1453433d6423SLionel Sambuc addr = kern_mb_mod->mod_start;
1454433d6423SLionel Sambuc
1455433d6423SLionel Sambuc /* Actually mapping in kernel */
1456433d6423SLionel Sambuc while(mapped < kern_size) {
1457433d6423SLionel Sambuc #if defined(__i386__)
1458433d6423SLionel Sambuc pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
1459433d6423SLionel Sambuc ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
1460433d6423SLionel Sambuc #elif defined(__arm__)
1461433d6423SLionel Sambuc pt->pt_dir[kern_pde] = (addr & ARM_VM_SECTION_MASK)
1462433d6423SLionel Sambuc | ARM_VM_SECTION
1463433d6423SLionel Sambuc | ARM_VM_SECTION_DOMAIN
1464433d6423SLionel Sambuc | ARM_VM_SECTION_CACHED
1465433d6423SLionel Sambuc | ARM_VM_SECTION_SUPER;
1466433d6423SLionel Sambuc #endif
1467433d6423SLionel Sambuc kern_pde++;
1468433d6423SLionel Sambuc mapped += ARCH_BIG_PAGE_SIZE;
1469433d6423SLionel Sambuc addr += ARCH_BIG_PAGE_SIZE;
1470433d6423SLionel Sambuc }
1471433d6423SLionel Sambuc
1472433d6423SLionel Sambuc /* Kernel also wants to know about all page directories. */
1473433d6423SLionel Sambuc {
1474433d6423SLionel Sambuc int pd;
1475433d6423SLionel Sambuc for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
1476433d6423SLionel Sambuc struct pdm *pdm = &pagedir_mappings[pd];
1477433d6423SLionel Sambuc
1478433d6423SLionel Sambuc assert(pdm->pdeno > 0);
1479433d6423SLionel Sambuc assert(pdm->pdeno > kern_pde);
1480433d6423SLionel Sambuc pt->pt_dir[pdm->pdeno] = pdm->val;
1481433d6423SLionel Sambuc }
1482433d6423SLionel Sambuc }
1483433d6423SLionel Sambuc
1484433d6423SLionel Sambuc /* Kernel also wants various mappings of its own. */
1485433d6423SLionel Sambuc for(i = 0; i < kernmappings; i++) {
1486433d6423SLionel Sambuc int r;
1487433d6423SLionel Sambuc if((r=pt_writemap(NULL, pt,
1488433d6423SLionel Sambuc kern_mappings[i].vir_addr,
1489433d6423SLionel Sambuc kern_mappings[i].phys_addr,
1490433d6423SLionel Sambuc kern_mappings[i].len,
1491433d6423SLionel Sambuc kern_mappings[i].flags, 0)) != OK) {
1492433d6423SLionel Sambuc return r;
1493433d6423SLionel Sambuc }
1494433d6423SLionel Sambuc
1495433d6423SLionel Sambuc }
1496433d6423SLionel Sambuc
1497433d6423SLionel Sambuc return OK;
1498433d6423SLionel Sambuc }
1499433d6423SLionel Sambuc
1500433d6423SLionel Sambuc int get_vm_self_pages(void) { return vm_self_pages; }
1501