1
2 #include <minix/com.h>
3 #include <minix/callnr.h>
4 #include <minix/type.h>
5 #include <minix/config.h>
6 #include <minix/const.h>
7 #include <minix/sysutil.h>
8 #include <minix/syslib.h>
9 #include <minix/debug.h>
10 #include <minix/bitmap.h>
11 #include <minix/hash.h>
12 #include <machine/multiboot.h>
13
14 #include <sys/mman.h>
15
16 #include <limits.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <assert.h>
20 #include <stdint.h>
21 #include <sys/param.h>
22
23 #include "vm.h"
24 #include "proto.h"
25 #include "util.h"
26 #include "glo.h"
27 #include "region.h"
28 #include "sanitycheck.h"
29 #include "memlist.h"
30 #include "memtype.h"
31 #include "regionavl.h"
32
33 static struct vir_region *map_copy_region(struct vmproc *vmp, struct
34 vir_region *vr);
35
map_region_init(void)36 void map_region_init(void)
37 {
38 }
39
map_printregion(struct vir_region * vr)40 static void map_printregion(struct vir_region *vr)
41 {
42 unsigned int i;
43 struct phys_region *ph;
44 printf("map_printmap: map_name: %s\n", vr->def_memtype->name);
45 printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
46 vr->vaddr, vr->length, vr->length/1024,
47 vr->def_memtype->name,
48 (vr->flags & VR_WRITABLE) ? "writable" : "readonly");
49 printf("\t\tphysblocks:\n");
50 for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
51 if(!(ph=vr->physblocks[i])) continue;
52 printf("\t\t@ %lx (refs %d): phys 0x%lx, %s\n",
53 (vr->vaddr + ph->offset),
54 ph->ph->refcount, ph->ph->phys,
55 pt_writable(vr->parent, vr->vaddr + ph->offset) ? "W" : "R");
56
57 }
58 }
59
physblock_get(struct vir_region * region,vir_bytes offset)60 struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
61 {
62 int i;
63 struct phys_region *foundregion;
64 assert(!(offset % VM_PAGE_SIZE));
65 assert( /* offset >= 0 && */ offset < region->length);
66 i = offset/VM_PAGE_SIZE;
67 if((foundregion = region->physblocks[i]))
68 assert(foundregion->offset == offset);
69 return foundregion;
70 }
71
physblock_set(struct vir_region * region,vir_bytes offset,struct phys_region * newphysr)72 void physblock_set(struct vir_region *region, vir_bytes offset,
73 struct phys_region *newphysr)
74 {
75 int i;
76 struct vmproc *proc;
77 assert(!(offset % VM_PAGE_SIZE));
78 assert( /* offset >= 0 && */ offset < region->length);
79 i = offset/VM_PAGE_SIZE;
80 proc = region->parent;
81 assert(proc);
82 if(newphysr) {
83 assert(!region->physblocks[i]);
84 assert(newphysr->offset == offset);
85 proc->vm_total += VM_PAGE_SIZE;
86 if (proc->vm_total > proc->vm_total_max)
87 proc->vm_total_max = proc->vm_total;
88 } else {
89 assert(region->physblocks[i]);
90 proc->vm_total -= VM_PAGE_SIZE;
91 }
92 region->physblocks[i] = newphysr;
93 }
94
95 /*===========================================================================*
96 * map_printmap *
97 *===========================================================================*/
map_printmap(struct vmproc * vmp)98 void map_printmap(struct vmproc *vmp)
99 {
100 struct vir_region *vr;
101 region_iter iter;
102
103 printf("memory regions in process %d:\n", vmp->vm_endpoint);
104
105 region_start_iter_least(&vmp->vm_regions_avl, &iter);
106 while((vr = region_get_iter(&iter))) {
107 map_printregion(vr);
108 region_incr_iter(&iter);
109 }
110 }
111
getnextvr(struct vir_region * vr)112 static struct vir_region *getnextvr(struct vir_region *vr)
113 {
114 struct vir_region *nextvr;
115 region_iter v_iter;
116 SLABSANE(vr);
117 region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
118 assert(region_get_iter(&v_iter));
119 assert(region_get_iter(&v_iter) == vr);
120 region_incr_iter(&v_iter);
121 nextvr = region_get_iter(&v_iter);
122 if(!nextvr) return NULL;
123 SLABSANE(nextvr);
124 assert(vr->parent == nextvr->parent);
125 assert(vr->vaddr < nextvr->vaddr);
126 assert(vr->vaddr + vr->length <= nextvr->vaddr);
127 return nextvr;
128 }
129
pr_writable(struct vir_region * vr,struct phys_region * pr)130 static int pr_writable(struct vir_region *vr, struct phys_region *pr)
131 {
132 assert(pr->memtype->writable);
133 return ((vr->flags & VR_WRITABLE) && pr->memtype->writable(pr));
134 }
135
136 #if SANITYCHECKS
137
138 /*===========================================================================*
139 * map_sanitycheck_pt *
140 *===========================================================================*/
map_sanitycheck_pt(struct vmproc * vmp,struct vir_region * vr,struct phys_region * pr)141 static int map_sanitycheck_pt(struct vmproc *vmp,
142 struct vir_region *vr, struct phys_region *pr)
143 {
144 struct phys_block *pb = pr->ph;
145 int rw;
146 int r;
147
148 if(pr_writable(vr, pr))
149 rw = PTF_WRITE;
150 else
151 rw = PTF_READ;
152
153 r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
154 pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
155
156 if(r != OK) {
157 printf("proc %d phys_region 0x%lx sanity check failed\n",
158 vmp->vm_endpoint, pr->offset);
159 map_printregion(vr);
160 }
161
162 return r;
163 }
164
165 /*===========================================================================*
166 * map_sanitycheck *
167 *===========================================================================*/
map_sanitycheck(const char * file,int line)168 void map_sanitycheck(const char *file, int line)
169 {
170 struct vmproc *vmp;
171
172 /* Macro for looping over all physical blocks of all regions of
173 * all processes.
174 */
175 #define ALLREGIONS(regioncode, physcode) \
176 for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
177 vir_bytes voffset; \
178 region_iter v_iter; \
179 struct vir_region *vr; \
180 if(!(vmp->vm_flags & VMF_INUSE)) \
181 continue; \
182 region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
183 while((vr = region_get_iter(&v_iter))) { \
184 struct phys_region *pr; \
185 regioncode; \
186 for(voffset = 0; voffset < vr->length; \
187 voffset += VM_PAGE_SIZE) { \
188 if(!(pr = physblock_get(vr, voffset))) \
189 continue; \
190 physcode; \
191 } \
192 region_incr_iter(&v_iter); \
193 } \
194 }
195
196 #define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
197 /* Basic pointers check. */
198 ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
199 ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
200
201 /* Do counting for consistency check. */
202 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
203 ALLREGIONS(;,MYASSERT(pr->offset == voffset););
204 ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
205 if(pr->ph->seencount == 1) {
206 if(pr->memtype->ev_sanitycheck)
207 pr->memtype->ev_sanitycheck(pr, file, line);
208 }
209 );
210
211 /* Do consistency check. */
212 ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
213 if(nextvr) {
214 MYASSERT(vr->vaddr < nextvr->vaddr);
215 MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
216 }
217 }
218 MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
219 if(pr->ph->flags & PBF_INCACHE) pr->ph->seencount++;
220 if(pr->ph->refcount != pr->ph->seencount) {
221 map_printmap(vmp);
222 printf("ph in vr %p: 0x%lx refcount %u "
223 "but seencount %u\n",
224 vr, pr->offset,
225 pr->ph->refcount, pr->ph->seencount);
226 }
227 {
228 int n_others = 0;
229 struct phys_region *others;
230 if(pr->ph->refcount > 0) {
231 MYASSERT(pr->ph->firstregion);
232 if(pr->ph->refcount == 1) {
233 MYASSERT(pr->ph->firstregion == pr);
234 }
235 } else {
236 MYASSERT(!pr->ph->firstregion);
237 }
238 for(others = pr->ph->firstregion; others;
239 others = others->next_ph_list) {
240 MYSLABSANE(others);
241 MYASSERT(others->ph == pr->ph);
242 n_others++;
243 }
244 if(pr->ph->flags & PBF_INCACHE) n_others++;
245 MYASSERT(pr->ph->refcount == n_others);
246 }
247 MYASSERT(pr->ph->refcount == pr->ph->seencount);
248 MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
249 ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
250 }
251
252 #endif
253
254 /*=========================================================================*
255 * map_ph_writept *
256 *=========================================================================*/
map_ph_writept(struct vmproc * vmp,struct vir_region * vr,struct phys_region * pr)257 int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
258 struct phys_region *pr)
259 {
260 int flags = PTF_PRESENT | PTF_USER;
261 struct phys_block *pb = pr->ph;
262
263 assert(vr);
264 assert(pr);
265 assert(pb);
266
267 assert(!(vr->vaddr % VM_PAGE_SIZE));
268 assert(!(pr->offset % VM_PAGE_SIZE));
269 assert(pb->refcount > 0);
270
271 if(pr_writable(vr, pr))
272 flags |= PTF_WRITE;
273 else
274 flags |= PTF_READ;
275
276
277 if(vr->def_memtype->pt_flags)
278 flags |= vr->def_memtype->pt_flags(vr);
279
280 if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
281 pb->phys, VM_PAGE_SIZE, flags,
282 #if SANITYCHECKS
283 !pr->written ? 0 :
284 #endif
285 WMF_OVERWRITE) != OK) {
286 printf("VM: map_writept: pt_writemap failed\n");
287 return ENOMEM;
288 }
289
290 #if SANITYCHECKS
291 USE(pr, pr->written = 1;);
292 #endif
293
294 return OK;
295 }
296
297 #define SLOT_FAIL ((vir_bytes) -1)
298
299 /*===========================================================================*
300 * region_find_slot_range *
301 *===========================================================================*/
region_find_slot_range(struct vmproc * vmp,vir_bytes minv,vir_bytes maxv,vir_bytes length)302 static vir_bytes region_find_slot_range(struct vmproc *vmp,
303 vir_bytes minv, vir_bytes maxv, vir_bytes length)
304 {
305 struct vir_region *lastregion;
306 vir_bytes startv = 0;
307 int foundflag = 0;
308 region_iter iter;
309
310 SANITYCHECK(SCL_FUNCTIONS);
311
312 /* Length must be reasonable. */
313 assert(length > 0);
314
315 /* Special case: allow caller to set maxv to 0 meaning 'I want
316 * it to be mapped in right here.'
317 */
318 if(maxv == 0) {
319 maxv = minv + length;
320
321 /* Sanity check. */
322 if(maxv <= minv) {
323 printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
324 minv, length);
325 return SLOT_FAIL;
326 }
327 }
328
329 /* Basic input sanity checks. */
330 assert(!(length % VM_PAGE_SIZE));
331 if(minv >= maxv) {
332 printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
333 minv, maxv, length);
334 }
335
336 assert(minv < maxv);
337
338 if(minv + length > maxv)
339 return SLOT_FAIL;
340
341 #define FREEVRANGE_TRY(rangestart, rangeend) { \
342 vir_bytes frstart = (rangestart), frend = (rangeend); \
343 frstart = MAX(frstart, minv); \
344 frend = MIN(frend, maxv); \
345 if(frend > frstart && (frend - frstart) >= length) { \
346 startv = frend-length; \
347 foundflag = 1; \
348 } }
349
350 #define FREEVRANGE(start, end) { \
351 assert(!foundflag); \
352 FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
353 if(!foundflag) { \
354 FREEVRANGE_TRY((start), (end)); \
355 } \
356 }
357
358 /* find region after maxv. */
359 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
360 lastregion = region_get_iter(&iter);
361
362 if(!lastregion) {
363 /* This is the free virtual address space after the last region. */
364 region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
365 lastregion = region_get_iter(&iter);
366 FREEVRANGE(lastregion ?
367 lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
368 }
369
370 if(!foundflag) {
371 struct vir_region *vr;
372 while((vr = region_get_iter(&iter)) && !foundflag) {
373 struct vir_region *nextvr;
374 region_decr_iter(&iter);
375 nextvr = region_get_iter(&iter);
376 FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
377 vr->vaddr);
378 }
379 }
380
381 if(!foundflag) {
382 return SLOT_FAIL;
383 }
384
385 /* However we got it, startv must be in the requested range. */
386 assert(startv >= minv);
387 assert(startv < maxv);
388 assert(startv + length <= maxv);
389
390 /* remember this position as a hint for next time. */
391 vmp->vm_region_top = startv + length;
392
393 return startv;
394 }
395
396 /*===========================================================================*
397 * region_find_slot *
398 *===========================================================================*/
region_find_slot(struct vmproc * vmp,vir_bytes minv,vir_bytes maxv,vir_bytes length)399 static vir_bytes region_find_slot(struct vmproc *vmp,
400 vir_bytes minv, vir_bytes maxv, vir_bytes length)
401 {
402 vir_bytes v, hint = vmp->vm_region_top;
403
404 /* use the top of the last inserted region as a minv hint if
405 * possible. remember that a zero maxv is a special case.
406 */
407
408 if(maxv && hint < maxv && hint >= minv) {
409 v = region_find_slot_range(vmp, minv, hint, length);
410
411 if(v != SLOT_FAIL)
412 return v;
413 }
414
415 return region_find_slot_range(vmp, minv, maxv, length);
416 }
417
phys_slot(vir_bytes len)418 static unsigned int phys_slot(vir_bytes len)
419 {
420 assert(!(len % VM_PAGE_SIZE));
421 return len / VM_PAGE_SIZE;
422 }
423
region_new(struct vmproc * vmp,vir_bytes startv,vir_bytes length,int flags,mem_type_t * memtype)424 static struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
425 int flags, mem_type_t *memtype)
426 {
427 struct vir_region *newregion;
428 struct phys_region **newphysregions;
429 static u32_t id;
430 int slots = phys_slot(length);
431
432 if(!(SLABALLOC(newregion))) {
433 printf("vm: region_new: could not allocate\n");
434 return NULL;
435 }
436
437 /* Fill in node details. */
438 USE(newregion,
439 memset(newregion, 0, sizeof(*newregion));
440 newregion->vaddr = startv;
441 newregion->length = length;
442 newregion->flags = flags;
443 newregion->def_memtype = memtype;
444 newregion->remaps = 0;
445 newregion->id = id++;
446 newregion->lower = newregion->higher = NULL;
447 newregion->parent = vmp;);
448
449 if(!(newphysregions = calloc(slots, sizeof(struct phys_region *)))) {
450 printf("VM: region_new: allocating phys blocks failed\n");
451 SLABFREE(newregion);
452 return NULL;
453 }
454
455 USE(newregion, newregion->physblocks = newphysregions;);
456
457 return newregion;
458 }
459
460 /*===========================================================================*
461 * map_page_region *
462 *===========================================================================*/
map_page_region(struct vmproc * vmp,vir_bytes minv,vir_bytes maxv,vir_bytes length,u32_t flags,int mapflags,mem_type_t * memtype)463 struct vir_region *map_page_region(struct vmproc *vmp, vir_bytes minv,
464 vir_bytes maxv, vir_bytes length, u32_t flags, int mapflags,
465 mem_type_t *memtype)
466 {
467 struct vir_region *newregion;
468 vir_bytes startv;
469
470 assert(!(length % VM_PAGE_SIZE));
471
472 SANITYCHECK(SCL_FUNCTIONS);
473
474 startv = region_find_slot(vmp, minv, maxv, length);
475 if (startv == SLOT_FAIL)
476 return NULL;
477
478 /* Now we want a new region. */
479 if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
480 printf("VM: map_page_region: allocating region failed\n");
481 return NULL;
482 }
483
484 /* If a new event is specified, invoke it. */
485 if(newregion->def_memtype->ev_new) {
486 if(newregion->def_memtype->ev_new(newregion) != OK) {
487 /* ev_new will have freed and removed the region */
488 return NULL;
489 }
490 }
491
492 if(mapflags & MF_PREALLOC) {
493 if(map_handle_memory(vmp, newregion, 0, length, 1,
494 NULL, 0, 0) != OK) {
495 printf("VM: map_page_region: prealloc failed\n");
496 map_free(newregion);
497 return NULL;
498 }
499 }
500
501 /* Pre-allocations should be uninitialized, but after that it's a
502 * different story.
503 */
504 USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
505
506 /* Link it. */
507 region_insert(&vmp->vm_regions_avl, newregion);
508
509 #if SANITYCHECKS
510 assert(startv == newregion->vaddr);
511 {
512 struct vir_region *nextvr;
513 if((nextvr = getnextvr(newregion))) {
514 assert(newregion->vaddr < nextvr->vaddr);
515 }
516 }
517 #endif
518
519 SANITYCHECK(SCL_FUNCTIONS);
520
521 return newregion;
522 }
523
524 /*===========================================================================*
525 * map_subfree *
526 *===========================================================================*/
map_subfree(struct vir_region * region,vir_bytes start,vir_bytes len)527 static int map_subfree(struct vir_region *region,
528 vir_bytes start, vir_bytes len)
529 {
530 struct phys_region *pr;
531 vir_bytes end = start+len;
532 vir_bytes voffset;
533
534 #if SANITYCHECKS
535 SLABSANE(region);
536 for(voffset = 0; voffset < phys_slot(region->length);
537 voffset += VM_PAGE_SIZE) {
538 struct phys_region *others;
539 struct phys_block *pb;
540
541 if(!(pr = physblock_get(region, voffset)))
542 continue;
543
544 pb = pr->ph;
545
546 for(others = pb->firstregion; others;
547 others = others->next_ph_list) {
548 assert(others->ph == pb);
549 }
550 }
551 #endif
552
553 for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
554 if(!(pr = physblock_get(region, voffset)))
555 continue;
556 assert(pr->offset >= start);
557 assert(pr->offset < end);
558 pb_unreferenced(region, pr, 1);
559 SLABFREE(pr);
560 }
561
562 return OK;
563 }
564
565 /*===========================================================================*
566 * map_free *
567 *===========================================================================*/
map_free(struct vir_region * region)568 int map_free(struct vir_region *region)
569 {
570 int r;
571
572 if((r=map_subfree(region, 0, region->length)) != OK) {
573 printf("%d\n", __LINE__);
574 return r;
575 }
576
577 if(region->def_memtype->ev_delete)
578 region->def_memtype->ev_delete(region);
579 free(region->physblocks);
580 region->physblocks = NULL;
581 SLABFREE(region);
582
583 return OK;
584 }
585
586 /*========================================================================*
587 * map_free_proc *
588 *========================================================================*/
map_free_proc(struct vmproc * vmp)589 int map_free_proc(struct vmproc *vmp)
590 {
591 struct vir_region *r;
592
593 while((r = region_search_root(&vmp->vm_regions_avl))) {
594 SANITYCHECK(SCL_DETAIL);
595 #if SANITYCHECKS
596 nocheck++;
597 #endif
598 region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
599 map_free(r);
600 #if SANITYCHECKS
601 nocheck--;
602 #endif
603 SANITYCHECK(SCL_DETAIL);
604 }
605
606 region_init(&vmp->vm_regions_avl);
607
608 SANITYCHECK(SCL_FUNCTIONS);
609
610 return OK;
611 }
612
613 /*===========================================================================*
614 * map_lookup *
615 *===========================================================================*/
map_lookup(struct vmproc * vmp,vir_bytes offset,struct phys_region ** physr)616 struct vir_region *map_lookup(struct vmproc *vmp,
617 vir_bytes offset, struct phys_region **physr)
618 {
619 struct vir_region *r;
620
621 SANITYCHECK(SCL_FUNCTIONS);
622
623 #if SANITYCHECKS
624 if(!region_search_root(&vmp->vm_regions_avl))
625 panic("process has no regions: %d", vmp->vm_endpoint);
626 #endif
627
628 if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
629 vir_bytes ph;
630 if(offset >= r->vaddr && offset < r->vaddr + r->length) {
631 ph = offset - r->vaddr;
632 if(physr) {
633 *physr = physblock_get(r, ph);
634 if(*physr) assert((*physr)->offset == ph);
635 }
636 return r;
637 }
638 }
639
640 SANITYCHECK(SCL_FUNCTIONS);
641
642 return NULL;
643 }
644
vrallocflags(u32_t flags)645 u32_t vrallocflags(u32_t flags)
646 {
647 u32_t allocflags = 0;
648
649 if(flags & VR_PHYS64K)
650 allocflags |= PAF_ALIGN64K;
651 if(flags & VR_LOWER16MB)
652 allocflags |= PAF_LOWER16MB;
653 if(flags & VR_LOWER1MB)
654 allocflags |= PAF_LOWER1MB;
655 if(!(flags & VR_UNINITIALIZED))
656 allocflags |= PAF_CLEAR;
657
658 return allocflags;
659 }
660
661 /*===========================================================================*
662 * map_pf *
663 *===========================================================================*/
map_pf(struct vmproc * vmp,struct vir_region * region,vir_bytes offset,int write,vfs_callback_t pf_callback,void * state,int len,int * io)664 int map_pf(struct vmproc *vmp,
665 struct vir_region *region,
666 vir_bytes offset,
667 int write,
668 vfs_callback_t pf_callback,
669 void *state,
670 int len,
671 int *io)
672 {
673 struct phys_region *ph;
674 int r = OK;
675
676 offset -= offset % VM_PAGE_SIZE;
677
678 /* assert(offset >= 0); */ /* always true */
679 assert(offset < region->length);
680
681 assert(!(region->vaddr % VM_PAGE_SIZE));
682 assert(!(write && !(region->flags & VR_WRITABLE)));
683
684 SANITYCHECK(SCL_FUNCTIONS);
685
686 if(!(ph = physblock_get(region, offset))) {
687 struct phys_block *pb;
688
689 /* New block. */
690
691 if(!(pb = pb_new(MAP_NONE))) {
692 printf("map_pf: pb_new failed\n");
693 return ENOMEM;
694 }
695
696 if(!(ph = pb_reference(pb, offset, region,
697 region->def_memtype))) {
698 printf("map_pf: pb_reference failed\n");
699 pb_free(pb);
700 return ENOMEM;
701 }
702 }
703
704 assert(ph);
705 assert(ph->ph);
706
707 /* If we're writing and the block is already
708 * writable, nothing to do.
709 */
710
711 assert(ph->memtype->writable);
712
713 if(!write || !ph->memtype->writable(ph)) {
714 assert(ph->memtype->ev_pagefault);
715 assert(ph->ph);
716
717 if((r = ph->memtype->ev_pagefault(vmp,
718 region, ph, write, pf_callback, state, len, io)) == SUSPEND) {
719 return SUSPEND;
720 }
721
722 if(r != OK) {
723 #if 0
724 printf("map_pf: pagefault in %s failed\n", ph->memtype->name);
725 #endif
726 if(ph)
727 pb_unreferenced(region, ph, 1);
728 return r;
729 }
730
731 assert(ph);
732 assert(ph->ph);
733 assert(ph->ph->phys != MAP_NONE);
734 }
735
736 assert(ph->ph);
737 assert(ph->ph->phys != MAP_NONE);
738
739 if((r = map_ph_writept(vmp, region, ph)) != OK) {
740 printf("map_pf: writept failed\n");
741 return r;
742 }
743
744 SANITYCHECK(SCL_FUNCTIONS);
745
746 #if SANITYCHECKS
747 if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
748 VM_PAGE_SIZE, write)) {
749 panic("map_pf: pt_checkrange failed: %d", r);
750 }
751 #endif
752
753 return r;
754 }
755
map_handle_memory(struct vmproc * vmp,struct vir_region * region,vir_bytes start_offset,vir_bytes length,int write,vfs_callback_t cb,void * state,int statelen)756 int map_handle_memory(struct vmproc *vmp,
757 struct vir_region *region, vir_bytes start_offset, vir_bytes length,
758 int write, vfs_callback_t cb, void *state, int statelen)
759 {
760 vir_bytes offset, lim;
761 int r;
762 int io = 0;
763
764 assert(length > 0);
765 lim = start_offset + length;
766 assert(lim > start_offset);
767
768 for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
769 if((r = map_pf(vmp, region, offset, write,
770 cb, state, statelen, &io)) != OK)
771 return r;
772
773 return OK;
774 }
775
776 /*===========================================================================*
777 * map_pin_memory *
778 *===========================================================================*/
map_pin_memory(struct vmproc * vmp)779 int map_pin_memory(struct vmproc *vmp)
780 {
781 struct vir_region *vr;
782 int r;
783 region_iter iter;
784 region_start_iter_least(&vmp->vm_regions_avl, &iter);
785 /* Scan all memory regions. */
786 pt_assert(&vmp->vm_pt);
787 while((vr = region_get_iter(&iter))) {
788 /* Make sure region is mapped to physical memory and writable.*/
789 r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
790 if(r != OK) {
791 panic("map_pin_memory: map_handle_memory failed: %d", r);
792 }
793 region_incr_iter(&iter);
794 }
795 pt_assert(&vmp->vm_pt);
796 return OK;
797 }
798
799 /*===========================================================================*
800 * map_copy_region *
801 *===========================================================================*/
map_copy_region(struct vmproc * vmp,struct vir_region * vr)802 struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
803 {
804 /* map_copy_region creates a complete copy of the vir_region
805 * data structure, linking in the same phys_blocks directly,
806 * but all in limbo, i.e., the caller has to link the vir_region
807 * to a process. Therefore it doesn't increase the refcount in
808 * the phys_block; the caller has to do this once it's linked.
809 * The reason for this is to keep the sanity checks working
810 * within this function.
811 */
812 struct vir_region *newvr;
813 struct phys_region *ph;
814 int r;
815 #if SANITYCHECKS
816 unsigned int cr;
817 cr = physregions(vr);
818 #endif
819 vir_bytes p;
820
821 if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->def_memtype)))
822 return NULL;
823
824 USE(newvr, newvr->parent = vmp;);
825
826 if(vr->def_memtype->ev_copy && (r=vr->def_memtype->ev_copy(vr, newvr)) != OK) {
827 map_free(newvr);
828 printf("VM: memtype-specific copy failed (%d)\n", r);
829 return NULL;
830 }
831
832 for(p = 0; p < phys_slot(vr->length); p++) {
833 struct phys_region *newph;
834
835 if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
836 newph = pb_reference(ph->ph, ph->offset, newvr,
837 vr->def_memtype);
838
839 if(!newph) { map_free(newvr); return NULL; }
840
841 if(ph->memtype->ev_reference)
842 ph->memtype->ev_reference(ph, newph);
843
844 #if SANITYCHECKS
845 USE(newph, newph->written = 0;);
846 assert(physregions(vr) == cr);
847 #endif
848 }
849
850 #if SANITYCHECKS
851 assert(physregions(vr) == physregions(newvr));
852 #endif
853
854 return newvr;
855 }
856
857 /*===========================================================================*
858 * copy_abs2region *
859 *===========================================================================*/
copy_abs2region(phys_bytes absaddr,struct vir_region * destregion,phys_bytes offset,phys_bytes len)860 int copy_abs2region(phys_bytes absaddr, struct vir_region *destregion,
861 phys_bytes offset, phys_bytes len)
862
863 {
864 assert(destregion);
865 assert(destregion->physblocks);
866 while(len > 0) {
867 phys_bytes sublen, suboffset;
868 struct phys_region *ph;
869 assert(destregion);
870 assert(destregion->physblocks);
871 if(!(ph = physblock_get(destregion, offset))) {
872 printf("VM: copy_abs2region: no phys region found (1).\n");
873 return EFAULT;
874 }
875 assert(ph->offset <= offset);
876 if(ph->offset+VM_PAGE_SIZE <= offset) {
877 printf("VM: copy_abs2region: no phys region found (2).\n");
878 return EFAULT;
879 }
880 suboffset = offset - ph->offset;
881 assert(suboffset < VM_PAGE_SIZE);
882 sublen = len;
883 if(sublen > VM_PAGE_SIZE - suboffset)
884 sublen = VM_PAGE_SIZE - suboffset;
885 assert(suboffset + sublen <= VM_PAGE_SIZE);
886 if(ph->ph->refcount != 1) {
887 printf("VM: copy_abs2region: refcount not 1.\n");
888 return EFAULT;
889 }
890
891 if(sys_abscopy(absaddr, ph->ph->phys + suboffset, sublen) != OK) {
892 printf("VM: copy_abs2region: abscopy failed.\n");
893 return EFAULT;
894 }
895 absaddr += sublen;
896 offset += sublen;
897 len -= sublen;
898 }
899
900 return OK;
901 }
902
903 /*=========================================================================*
904 * map_writept *
905 *=========================================================================*/
map_writept(struct vmproc * vmp)906 int map_writept(struct vmproc *vmp)
907 {
908 struct vir_region *vr;
909 struct phys_region *ph;
910 int r;
911 region_iter v_iter;
912 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
913
914 while((vr = region_get_iter(&v_iter))) {
915 vir_bytes p;
916 for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
917 if(!(ph = physblock_get(vr, p))) continue;
918
919 if((r=map_ph_writept(vmp, vr, ph)) != OK) {
920 printf("VM: map_writept: failed\n");
921 return r;
922 }
923 }
924 region_incr_iter(&v_iter);
925 }
926
927 return OK;
928 }
929
930 /*========================================================================*
931 * map_proc_copy *
932 *========================================================================*/
map_proc_copy(struct vmproc * dst,struct vmproc * src)933 int map_proc_copy(struct vmproc *dst, struct vmproc *src)
934 {
935 /* Copy all the memory regions from the src process to the dst process. */
936 region_init(&dst->vm_regions_avl);
937
938 return map_proc_copy_range(dst, src, NULL, NULL);
939 }
940
941 /*========================================================================*
942 * map_proc_copy_range *
943 *========================================================================*/
map_proc_copy_range(struct vmproc * dst,struct vmproc * src,struct vir_region * start_src_vr,struct vir_region * end_src_vr)944 int map_proc_copy_range(struct vmproc *dst, struct vmproc *src,
945 struct vir_region *start_src_vr, struct vir_region *end_src_vr)
946 {
947 struct vir_region *vr;
948 region_iter v_iter;
949
950 if(!start_src_vr)
951 start_src_vr = region_search_least(&src->vm_regions_avl);
952 if(!end_src_vr)
953 end_src_vr = region_search_greatest(&src->vm_regions_avl);
954
955 assert(start_src_vr && end_src_vr);
956 assert(start_src_vr->parent == src);
957 region_start_iter(&src->vm_regions_avl, &v_iter,
958 start_src_vr->vaddr, AVL_EQUAL);
959 assert(region_get_iter(&v_iter) == start_src_vr);
960
961 /* Copy source regions into the destination. */
962
963 SANITYCHECK(SCL_FUNCTIONS);
964
965 while((vr = region_get_iter(&v_iter))) {
966 struct vir_region *newvr;
967 if(!(newvr = map_copy_region(dst, vr))) {
968 map_free_proc(dst);
969 return ENOMEM;
970 }
971 region_insert(&dst->vm_regions_avl, newvr);
972 assert(vr->length == newvr->length);
973
974 #if SANITYCHECKS
975 {
976 vir_bytes vaddr;
977 struct phys_region *orig_ph, *new_ph;
978 assert(vr->physblocks != newvr->physblocks);
979 for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
980 orig_ph = physblock_get(vr, vaddr);
981 new_ph = physblock_get(newvr, vaddr);
982 if(!orig_ph) { assert(!new_ph); continue;}
983 assert(new_ph);
984 assert(orig_ph != new_ph);
985 assert(orig_ph->ph == new_ph->ph);
986 }
987 }
988 #endif
989 if(vr == end_src_vr) {
990 break;
991 }
992 region_incr_iter(&v_iter);
993 }
994
995 map_writept(src);
996 map_writept(dst);
997
998 SANITYCHECK(SCL_FUNCTIONS);
999 return OK;
1000 }
1001
map_region_extend_upto_v(struct vmproc * vmp,vir_bytes v)1002 int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
1003 {
1004 vir_bytes offset = v, limit, extralen;
1005 struct vir_region *vr, *nextvr;
1006 struct phys_region **newpr;
1007 int newslots, prevslots, addedslots, r;
1008
1009 offset = roundup(offset, VM_PAGE_SIZE);
1010
1011 if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
1012 printf("VM: nothing to extend\n");
1013 return ENOMEM;
1014 }
1015
1016 if(vr->vaddr + vr->length >= v) return OK;
1017
1018 limit = vr->vaddr + vr->length;
1019
1020 assert(vr->vaddr <= offset);
1021 newslots = phys_slot(offset - vr->vaddr);
1022 prevslots = phys_slot(vr->length);
1023 assert(newslots >= prevslots);
1024 addedslots = newslots - prevslots;
1025 extralen = offset - limit;
1026 assert(extralen > 0);
1027
1028 if((nextvr = getnextvr(vr))) {
1029 assert(offset <= nextvr->vaddr);
1030 }
1031
1032 if(nextvr && nextvr->vaddr < offset) {
1033 printf("VM: can't grow into next region\n");
1034 return ENOMEM;
1035 }
1036
1037 if(!vr->def_memtype->ev_resize) {
1038 if(!map_page_region(vmp, limit, 0, extralen,
1039 VR_WRITABLE | VR_ANON,
1040 0, &mem_type_anon)) {
1041 printf("resize: couldn't put anon memory there\n");
1042 return ENOMEM;
1043 }
1044 return OK;
1045 }
1046
1047 if(!(newpr = realloc(vr->physblocks,
1048 newslots * sizeof(struct phys_region *)))) {
1049 printf("VM: map_region_extend_upto_v: realloc failed\n");
1050 return ENOMEM;
1051 }
1052
1053 vr->physblocks = newpr;
1054 memset(vr->physblocks + prevslots, 0,
1055 addedslots * sizeof(struct phys_region *));
1056
1057 r = vr->def_memtype->ev_resize(vmp, vr, offset - vr->vaddr);
1058
1059 return r;
1060 }
1061
1062 /*========================================================================*
1063 * map_unmap_region *
1064 *========================================================================*/
map_unmap_region(struct vmproc * vmp,struct vir_region * r,vir_bytes offset,vir_bytes len)1065 int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
1066 vir_bytes offset, vir_bytes len)
1067 {
1068 /* Shrink the region by 'len' bytes, from the start. Unreference
1069 * memory it used to reference if any.
1070 */
1071 vir_bytes regionstart;
1072 int freeslots = phys_slot(len);
1073
1074 SANITYCHECK(SCL_FUNCTIONS);
1075
1076 if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
1077 printf("VM: bogus length 0x%lx\n", len);
1078 return EINVAL;
1079 }
1080
1081 regionstart = r->vaddr + offset;
1082
1083 /* unreference its memory */
1084 map_subfree(r, offset, len);
1085
1086 /* if unmap was at start/end of this region, it actually shrinks */
1087 if(r->length == len) {
1088 /* Whole region disappears. Unlink and free it. */
1089 region_remove(&vmp->vm_regions_avl, r->vaddr);
1090 map_free(r);
1091 } else if(offset == 0) {
1092 struct phys_region *pr;
1093 vir_bytes voffset;
1094 int remslots;
1095
1096 if(!r->def_memtype->ev_lowshrink) {
1097 printf("VM: low-shrinking not implemented for %s\n",
1098 r->def_memtype->name);
1099 return EINVAL;
1100 }
1101
1102 if(r->def_memtype->ev_lowshrink(r, len) != OK) {
1103 printf("VM: low-shrinking failed for %s\n",
1104 r->def_memtype->name);
1105 return EINVAL;
1106 }
1107
1108 region_remove(&vmp->vm_regions_avl, r->vaddr);
1109
1110 USE(r,
1111 r->vaddr += len;);
1112
1113 remslots = phys_slot(r->length);
1114
1115 region_insert(&vmp->vm_regions_avl, r);
1116
1117 /* vaddr has increased; to make all the phys_regions
1118 * point to the same addresses, make them shrink by the
1119 * same amount.
1120 */
1121 for(voffset = len; voffset < r->length;
1122 voffset += VM_PAGE_SIZE) {
1123 if(!(pr = physblock_get(r, voffset))) continue;
1124 assert(pr->offset >= offset);
1125 assert(pr->offset >= len);
1126 USE(pr, pr->offset -= len;);
1127 }
1128 if(remslots)
1129 memmove(r->physblocks, r->physblocks + freeslots,
1130 remslots * sizeof(struct phys_region *));
1131 USE(r, r->length -= len;);
1132 } else if(offset + len == r->length) {
1133 assert(len <= r->length);
1134 r->length -= len;
1135 }
1136
1137 SANITYCHECK(SCL_DETAIL);
1138
1139 if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
1140 MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
1141 printf("VM: map_unmap_region: pt_writemap failed\n");
1142 return ENOMEM;
1143 }
1144
1145 SANITYCHECK(SCL_FUNCTIONS);
1146
1147 return OK;
1148 }
1149
split_region(struct vmproc * vmp,struct vir_region * vr,struct vir_region ** vr1,struct vir_region ** vr2,vir_bytes split_len)1150 static int split_region(struct vmproc *vmp, struct vir_region *vr,
1151 struct vir_region **vr1, struct vir_region **vr2, vir_bytes split_len)
1152 {
1153 struct vir_region *r1 = NULL, *r2 = NULL;
1154 vir_bytes rem_len = vr->length - split_len;
1155 int slots1, slots2;
1156 vir_bytes voffset;
1157 int n1 = 0, n2 = 0;
1158
1159 assert(!(split_len % VM_PAGE_SIZE));
1160 assert(!(rem_len % VM_PAGE_SIZE));
1161 assert(!(vr->vaddr % VM_PAGE_SIZE));
1162 assert(!(vr->length % VM_PAGE_SIZE));
1163
1164 if(!vr->def_memtype->ev_split) {
1165 printf("VM: split region not implemented for %s\n",
1166 vr->def_memtype->name);
1167 sys_diagctl_stacktrace(vmp->vm_endpoint);
1168 return EINVAL;
1169 }
1170
1171 slots1 = phys_slot(split_len);
1172 slots2 = phys_slot(rem_len);
1173
1174 if(!(r1 = region_new(vmp, vr->vaddr, split_len, vr->flags,
1175 vr->def_memtype))) {
1176 goto bail;
1177 }
1178
1179 if(!(r2 = region_new(vmp, vr->vaddr+split_len, rem_len, vr->flags,
1180 vr->def_memtype))) {
1181 map_free(r1);
1182 goto bail;
1183 }
1184
1185 for(voffset = 0; voffset < r1->length; voffset += VM_PAGE_SIZE) {
1186 struct phys_region *ph, *phn;
1187 if(!(ph = physblock_get(vr, voffset))) continue;
1188 if(!(phn = pb_reference(ph->ph, voffset, r1, ph->memtype)))
1189 goto bail;
1190 n1++;
1191 }
1192
1193 for(voffset = 0; voffset < r2->length; voffset += VM_PAGE_SIZE) {
1194 struct phys_region *ph, *phn;
1195 if(!(ph = physblock_get(vr, split_len + voffset))) continue;
1196 if(!(phn = pb_reference(ph->ph, voffset, r2, ph->memtype)))
1197 goto bail;
1198 n2++;
1199 }
1200
1201 vr->def_memtype->ev_split(vmp, vr, r1, r2);
1202
1203 region_remove(&vmp->vm_regions_avl, vr->vaddr);
1204 map_free(vr);
1205 region_insert(&vmp->vm_regions_avl, r1);
1206 region_insert(&vmp->vm_regions_avl, r2);
1207
1208 *vr1 = r1;
1209 *vr2 = r2;
1210
1211 return OK;
1212
1213 bail:
1214 if(r1) map_free(r1);
1215 if(r2) map_free(r2);
1216
1217 printf("split_region: failed\n");
1218
1219 return ENOMEM;
1220 }
1221
map_unmap_range(struct vmproc * vmp,vir_bytes unmap_start,vir_bytes length)1222 int map_unmap_range(struct vmproc *vmp, vir_bytes unmap_start, vir_bytes length)
1223 {
1224 vir_bytes o = unmap_start % VM_PAGE_SIZE, unmap_limit;
1225 region_iter v_iter;
1226 struct vir_region *vr, *nextvr;
1227
1228 unmap_start -= o;
1229 length += o;
1230 length = roundup(length, VM_PAGE_SIZE);
1231 unmap_limit = length + unmap_start;
1232
1233 if(length < VM_PAGE_SIZE) return EINVAL;
1234 if(unmap_limit <= unmap_start) return EINVAL;
1235
1236 region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_LESS_EQUAL);
1237
1238 if(!(vr = region_get_iter(&v_iter))) {
1239 region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_GREATER);
1240 if(!(vr = region_get_iter(&v_iter))) {
1241 return OK;
1242 }
1243 }
1244
1245 assert(vr);
1246
1247 for(; vr && vr->vaddr < unmap_limit; vr = nextvr) {
1248 vir_bytes thislimit = vr->vaddr + vr->length;
1249 vir_bytes this_unmap_start, this_unmap_limit;
1250 vir_bytes remainlen;
1251 int r;
1252
1253 region_incr_iter(&v_iter);
1254 nextvr = region_get_iter(&v_iter);
1255
1256 assert(thislimit > vr->vaddr);
1257
1258 this_unmap_start = MAX(unmap_start, vr->vaddr);
1259 this_unmap_limit = MIN(unmap_limit, thislimit);
1260
1261 if(this_unmap_start >= this_unmap_limit) continue;
1262
1263 if(this_unmap_start > vr->vaddr && this_unmap_limit < thislimit) {
1264 struct vir_region *vr1, *vr2;
1265 vir_bytes split_len = this_unmap_limit - vr->vaddr;
1266 assert(split_len > 0);
1267 assert(split_len < vr->length);
1268 if((r=split_region(vmp, vr, &vr1, &vr2, split_len)) != OK) {
1269 printf("VM: unmap split failed\n");
1270 return r;
1271 }
1272 vr = vr1;
1273 thislimit = vr->vaddr + vr->length;
1274 }
1275
1276 remainlen = this_unmap_limit - vr->vaddr;
1277
1278 assert(this_unmap_start >= vr->vaddr);
1279 assert(this_unmap_limit <= thislimit);
1280 assert(remainlen > 0);
1281
1282 r = map_unmap_region(vmp, vr, this_unmap_start - vr->vaddr,
1283 this_unmap_limit - this_unmap_start);
1284
1285 if(r != OK) {
1286 printf("map_unmap_range: map_unmap_region failed\n");
1287 return r;
1288 }
1289
1290 if(nextvr) {
1291 region_start_iter(&vmp->vm_regions_avl, &v_iter, nextvr->vaddr, AVL_EQUAL);
1292 assert(region_get_iter(&v_iter) == nextvr);
1293 }
1294 }
1295
1296 return OK;
1297
1298 }
1299
1300 /*========================================================================*
1301 * map_region_lookup_type *
1302 *========================================================================*/
map_region_lookup_type(struct vmproc * vmp,u32_t type)1303 struct vir_region* map_region_lookup_type(struct vmproc *vmp, u32_t type)
1304 {
1305 struct vir_region *vr;
1306 struct phys_region *pr;
1307 vir_bytes used = 0, weighted = 0;
1308 region_iter v_iter;
1309 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1310
1311 while((vr = region_get_iter(&v_iter))) {
1312 region_incr_iter(&v_iter);
1313 if(vr->flags & type)
1314 return vr;
1315 }
1316
1317 return NULL;
1318 }
1319
1320 /*========================================================================*
1321 * map_get_phys *
1322 *========================================================================*/
map_get_phys(struct vmproc * vmp,vir_bytes addr,phys_bytes * r)1323 int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
1324 {
1325 struct vir_region *vr;
1326
1327 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1328 (vr->vaddr != addr))
1329 return EINVAL;
1330
1331 if (!vr->def_memtype->regionid)
1332 return EINVAL;
1333
1334 if(r)
1335 *r = vr->def_memtype->regionid(vr);
1336
1337 return OK;
1338 }
1339
1340 /*========================================================================*
1341 * map_get_ref *
1342 *========================================================================*/
map_get_ref(struct vmproc * vmp,vir_bytes addr,u8_t * cnt)1343 int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
1344 {
1345 struct vir_region *vr;
1346
1347 if (!(vr = map_lookup(vmp, addr, NULL)) ||
1348 (vr->vaddr != addr) || !vr->def_memtype->refcount)
1349 return EINVAL;
1350
1351 if (cnt)
1352 *cnt = vr->def_memtype->refcount(vr);
1353
1354 return OK;
1355 }
1356
get_usage_info_kernel(struct vm_usage_info * vui)1357 void get_usage_info_kernel(struct vm_usage_info *vui)
1358 {
1359 memset(vui, 0, sizeof(*vui));
1360 vui->vui_total = kernel_boot_info.kernel_allocated_bytes +
1361 kernel_boot_info.kernel_allocated_bytes_dynamic;
1362 /* All of the kernel's pages are actually mapped in. */
1363 vui->vui_virtual = vui->vui_mvirtual = vui->vui_total;
1364 }
1365
get_usage_info_vm(struct vm_usage_info * vui)1366 static void get_usage_info_vm(struct vm_usage_info *vui)
1367 {
1368 memset(vui, 0, sizeof(*vui));
1369 vui->vui_total = kernel_boot_info.vm_allocated_bytes +
1370 get_vm_self_pages() * VM_PAGE_SIZE;
1371 /* All of VM's pages are actually mapped in. */
1372 vui->vui_virtual = vui->vui_mvirtual = vui->vui_total;
1373 }
1374
1375 /*
1376 * Return whether the given region is for the associated process's stack.
1377 * Unfortunately, we do not actually have this information: in most cases, VM
1378 * is not responsible for actually setting up the stack in the first place.
1379 * Fortunately, this is only for statistical purposes, so we can get away with
1380 * guess work. However, it is certainly not accurate in the light of userspace
1381 * thread stacks, or if the process is messing with its stack in any way, or if
1382 * (currently) VFS decides to put the stack elsewhere, etcetera.
1383 */
1384 static int
is_stack_region(struct vir_region * vr)1385 is_stack_region(struct vir_region * vr)
1386 {
1387
1388 return (vr->vaddr == VM_STACKTOP - DEFAULT_STACK_LIMIT &&
1389 vr->length == DEFAULT_STACK_LIMIT);
1390 }
1391
1392 /*========================================================================*
1393 * get_usage_info *
1394 *========================================================================*/
get_usage_info(struct vmproc * vmp,struct vm_usage_info * vui)1395 void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
1396 {
1397 struct vir_region *vr;
1398 struct phys_region *ph;
1399 region_iter v_iter;
1400 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1401 vir_bytes voffset;
1402
1403 memset(vui, 0, sizeof(*vui));
1404
1405 if(vmp->vm_endpoint == VM_PROC_NR) {
1406 get_usage_info_vm(vui);
1407 return;
1408 }
1409
1410 if(vmp->vm_endpoint < 0) {
1411 get_usage_info_kernel(vui);
1412 return;
1413 }
1414
1415 while((vr = region_get_iter(&v_iter))) {
1416 vui->vui_virtual += vr->length;
1417 vui->vui_mvirtual += vr->length;
1418 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1419 if(!(ph = physblock_get(vr, voffset))) {
1420 /* mvirtual: discount unmapped stack pages. */
1421 if (is_stack_region(vr))
1422 vui->vui_mvirtual -= VM_PAGE_SIZE;
1423 continue;
1424 }
1425 /* All present pages are counted towards the total. */
1426 vui->vui_total += VM_PAGE_SIZE;
1427
1428 if (ph->ph->refcount > 1) {
1429 /* Any page with a refcount > 1 is common. */
1430 vui->vui_common += VM_PAGE_SIZE;
1431
1432 /* Any common, non-COW page is shared. */
1433 if (vr->flags & VR_SHARED)
1434 vui->vui_shared += VM_PAGE_SIZE;
1435 }
1436 }
1437 region_incr_iter(&v_iter);
1438 }
1439
1440 /*
1441 * Also include getrusage resource information, so that the MIB service
1442 * need not make more than one call to VM for each process entry.
1443 */
1444 vui->vui_maxrss = vmp->vm_total_max / 1024L;
1445 vui->vui_minflt = vmp->vm_minor_page_fault;
1446 vui->vui_majflt = vmp->vm_major_page_fault;
1447 }
1448
1449 /*===========================================================================*
1450 * get_region_info *
1451 *===========================================================================*/
get_region_info(struct vmproc * vmp,struct vm_region_info * vri,int max,vir_bytes * nextp)1452 int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
1453 int max, vir_bytes *nextp)
1454 {
1455 struct vir_region *vr;
1456 vir_bytes next;
1457 int count;
1458 region_iter v_iter;
1459
1460 next = *nextp;
1461
1462 if (!max) return 0;
1463
1464 region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
1465 if(!(vr = region_get_iter(&v_iter))) return 0;
1466
1467 for(count = 0; (vr = region_get_iter(&v_iter)) && count < max;
1468 region_incr_iter(&v_iter)) {
1469 struct phys_region *ph1 = NULL, *ph2 = NULL;
1470 vir_bytes voffset;
1471
1472 /* where to start on next iteration, regardless of what we find now */
1473 next = vr->vaddr + vr->length;
1474
1475 /* Report part of the region that's actually in use. */
1476
1477 /* Get first and last phys_regions, if any */
1478 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1479 struct phys_region *ph;
1480 if(!(ph = physblock_get(vr, voffset))) continue;
1481 if(!ph1) ph1 = ph;
1482 ph2 = ph;
1483 }
1484
1485 if(!ph1 || !ph2) {
1486 printf("skipping empty region 0x%lx-0x%lx\n",
1487 vr->vaddr, vr->vaddr+vr->length);
1488 continue;
1489 }
1490
1491 /* Report start+length of region starting from lowest use. */
1492 vri->vri_addr = vr->vaddr + ph1->offset;
1493 vri->vri_prot = PROT_READ;
1494 vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
1495
1496 /* "AND" the provided protection with per-page protection. */
1497 if (vr->flags & VR_WRITABLE)
1498 vri->vri_prot |= PROT_WRITE;
1499 count++;
1500 vri++;
1501 }
1502
1503 *nextp = next;
1504 return count;
1505 }
1506
1507 /*========================================================================*
1508 * regionprintstats *
1509 *========================================================================*/
printregionstats(struct vmproc * vmp)1510 void printregionstats(struct vmproc *vmp)
1511 {
1512 struct vir_region *vr;
1513 struct phys_region *pr;
1514 vir_bytes used = 0, weighted = 0;
1515 region_iter v_iter;
1516 region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
1517
1518 while((vr = region_get_iter(&v_iter))) {
1519 vir_bytes voffset;
1520 region_incr_iter(&v_iter);
1521 if(vr->flags & VR_DIRECT)
1522 continue;
1523 for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
1524 if(!(pr = physblock_get(vr, voffset))) continue;
1525 used += VM_PAGE_SIZE;
1526 weighted += VM_PAGE_SIZE / pr->ph->refcount;
1527 }
1528 }
1529
1530 printf("%6lukB %6lukB\n", used/1024, weighted/1024);
1531
1532 return;
1533 }
1534
map_setparent(struct vmproc * vmp)1535 void map_setparent(struct vmproc *vmp)
1536 {
1537 region_iter iter;
1538 struct vir_region *vr;
1539 region_start_iter_least(&vmp->vm_regions_avl, &iter);
1540 while((vr = region_get_iter(&iter))) {
1541 USE(vr, vr->parent = vmp;);
1542 region_incr_iter(&iter);
1543 }
1544 }
1545
physregions(struct vir_region * vr)1546 unsigned int physregions(struct vir_region *vr)
1547 {
1548 unsigned int n = 0;
1549 vir_bytes voffset;
1550 for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
1551 if(physblock_get(vr, voffset))
1552 n++;
1553 }
1554 return n;
1555 }
1556