1433d6423SLionel Sambuc
2433d6423SLionel Sambuc /* This file contains some utility routines for VM. */
3433d6423SLionel Sambuc
4433d6423SLionel Sambuc #define _SYSTEM 1
5433d6423SLionel Sambuc
6433d6423SLionel Sambuc #include <minix/callnr.h>
7433d6423SLionel Sambuc #include <minix/com.h>
8433d6423SLionel Sambuc #include <minix/config.h>
9433d6423SLionel Sambuc #include <minix/const.h>
10433d6423SLionel Sambuc #include <minix/ds.h>
11433d6423SLionel Sambuc #include <minix/endpoint.h>
12433d6423SLionel Sambuc #include <minix/minlib.h>
13433d6423SLionel Sambuc #include <minix/type.h>
14433d6423SLionel Sambuc #include <minix/ipc.h>
15433d6423SLionel Sambuc #include <minix/sysutil.h>
16433d6423SLionel Sambuc #include <minix/syslib.h>
17433d6423SLionel Sambuc #include <minix/type.h>
18433d6423SLionel Sambuc #include <minix/bitmap.h>
1963483e02SCristiano Giuffrida #include <minix/rs.h>
20433d6423SLionel Sambuc #include <string.h>
21433d6423SLionel Sambuc #include <errno.h>
22433d6423SLionel Sambuc #include <unistd.h>
23433d6423SLionel Sambuc #include <assert.h>
24d196e2c3SCristiano Giuffrida #include <sys/cdefs.h>
25433d6423SLionel Sambuc #include <sys/param.h>
26433d6423SLionel Sambuc #include <sys/mman.h>
27433d6423SLionel Sambuc #include <sys/resource.h>
28433d6423SLionel Sambuc
29433d6423SLionel Sambuc #include "proto.h"
30433d6423SLionel Sambuc #include "glo.h"
31433d6423SLionel Sambuc #include "util.h"
32433d6423SLionel Sambuc #include "region.h"
33433d6423SLionel Sambuc #include "sanitycheck.h"
34433d6423SLionel Sambuc
35433d6423SLionel Sambuc #include <machine/archtypes.h>
36433d6423SLionel Sambuc #include "kernel/const.h"
37433d6423SLionel Sambuc #include "kernel/config.h"
38433d6423SLionel Sambuc #include "kernel/type.h"
39433d6423SLionel Sambuc #include "kernel/proc.h"
40433d6423SLionel Sambuc
41433d6423SLionel Sambuc /*===========================================================================*
42433d6423SLionel Sambuc * get_mem_chunks *
43433d6423SLionel Sambuc *===========================================================================*/
get_mem_chunks(struct memory * mem_chunks)44433d6423SLionel Sambuc void get_mem_chunks(
45433d6423SLionel Sambuc struct memory *mem_chunks) /* store mem chunks here */
46433d6423SLionel Sambuc {
47433d6423SLionel Sambuc /* Initialize the free memory list from the kernel-provided memory map. Translate
48433d6423SLionel Sambuc * the byte offsets and sizes in this list to clicks, properly truncated.
49433d6423SLionel Sambuc */
50433d6423SLionel Sambuc phys_bytes base, size, limit;
51433d6423SLionel Sambuc int i;
52433d6423SLionel Sambuc struct memory *memp;
53433d6423SLionel Sambuc
54433d6423SLionel Sambuc /* Initialize everything to zero. */
55433d6423SLionel Sambuc memset(mem_chunks, 0, NR_MEMS*sizeof(*mem_chunks));
56433d6423SLionel Sambuc
57433d6423SLionel Sambuc /* Obtain and parse memory from kernel environment. */
58433d6423SLionel Sambuc /* XXX Any memory chunk in excess of NR_MEMS is silently ignored. */
59433d6423SLionel Sambuc for(i = 0; i < MIN(MAXMEMMAP, NR_MEMS); i++) {
60433d6423SLionel Sambuc mem_chunks[i].base = kernel_boot_info.memmap[i].mm_base_addr;
61433d6423SLionel Sambuc mem_chunks[i].size = kernel_boot_info.memmap[i].mm_length;
62433d6423SLionel Sambuc }
63433d6423SLionel Sambuc
64433d6423SLionel Sambuc /* Round physical memory to clicks. Round start up, round end down. */
65433d6423SLionel Sambuc for (i = 0; i < NR_MEMS; i++) {
66433d6423SLionel Sambuc memp = &mem_chunks[i]; /* next mem chunk is stored here */
67433d6423SLionel Sambuc base = mem_chunks[i].base;
68433d6423SLionel Sambuc size = mem_chunks[i].size;
69433d6423SLionel Sambuc limit = base + size;
70433d6423SLionel Sambuc base = (phys_bytes) (CLICK_CEIL(base));
71433d6423SLionel Sambuc limit = (phys_bytes) (CLICK_FLOOR(limit));
72433d6423SLionel Sambuc if (limit <= base) {
73433d6423SLionel Sambuc memp->base = memp->size = 0;
74433d6423SLionel Sambuc } else {
75433d6423SLionel Sambuc memp->base = base >> CLICK_SHIFT;
76433d6423SLionel Sambuc memp->size = (limit - base) >> CLICK_SHIFT;
77433d6423SLionel Sambuc }
78433d6423SLionel Sambuc }
79433d6423SLionel Sambuc }
80433d6423SLionel Sambuc
81433d6423SLionel Sambuc /*===========================================================================*
82433d6423SLionel Sambuc * vm_isokendpt *
83433d6423SLionel Sambuc *===========================================================================*/
vm_isokendpt(endpoint_t endpoint,int * procn)84433d6423SLionel Sambuc int vm_isokendpt(endpoint_t endpoint, int *procn)
85433d6423SLionel Sambuc {
86433d6423SLionel Sambuc *procn = _ENDPOINT_P(endpoint);
87433d6423SLionel Sambuc if(*procn < 0 || *procn >= NR_PROCS)
88433d6423SLionel Sambuc return EINVAL;
89433d6423SLionel Sambuc if(*procn >= 0 && endpoint != vmproc[*procn].vm_endpoint)
90433d6423SLionel Sambuc return EDEADEPT;
91433d6423SLionel Sambuc if(*procn >= 0 && !(vmproc[*procn].vm_flags & VMF_INUSE))
92433d6423SLionel Sambuc return EDEADEPT;
93433d6423SLionel Sambuc return OK;
94433d6423SLionel Sambuc }
95433d6423SLionel Sambuc
96433d6423SLionel Sambuc
97433d6423SLionel Sambuc /*===========================================================================*
98433d6423SLionel Sambuc * do_info *
99433d6423SLionel Sambuc *===========================================================================*/
do_info(message * m)100433d6423SLionel Sambuc int do_info(message *m)
101433d6423SLionel Sambuc {
102433d6423SLionel Sambuc struct vm_stats_info vsi;
103433d6423SLionel Sambuc struct vm_usage_info vui;
104433d6423SLionel Sambuc static struct vm_region_info vri[MAX_VRI_COUNT];
105433d6423SLionel Sambuc struct vmproc *vmp;
106433d6423SLionel Sambuc vir_bytes addr, size, next, ptr;
107433d6423SLionel Sambuc int r, pr, dummy, count, free_pages, largest_contig;
108433d6423SLionel Sambuc
109433d6423SLionel Sambuc if (vm_isokendpt(m->m_source, &pr) != OK)
110433d6423SLionel Sambuc return EINVAL;
111433d6423SLionel Sambuc vmp = &vmproc[pr];
112433d6423SLionel Sambuc
113433d6423SLionel Sambuc ptr = (vir_bytes) m->m_lsys_vm_info.ptr;
114433d6423SLionel Sambuc
115433d6423SLionel Sambuc switch(m->m_lsys_vm_info.what) {
116433d6423SLionel Sambuc case VMIW_STATS:
117433d6423SLionel Sambuc vsi.vsi_pagesize = VM_PAGE_SIZE;
118433d6423SLionel Sambuc vsi.vsi_total = total_pages;
119433d6423SLionel Sambuc memstats(&dummy, &free_pages, &largest_contig);
120433d6423SLionel Sambuc vsi.vsi_free = free_pages;
121433d6423SLionel Sambuc vsi.vsi_largest = largest_contig;
122433d6423SLionel Sambuc
123433d6423SLionel Sambuc get_stats_info(&vsi);
124433d6423SLionel Sambuc
125433d6423SLionel Sambuc addr = (vir_bytes) &vsi;
126433d6423SLionel Sambuc size = sizeof(vsi);
127433d6423SLionel Sambuc
128433d6423SLionel Sambuc break;
129433d6423SLionel Sambuc
130433d6423SLionel Sambuc case VMIW_USAGE:
131433d6423SLionel Sambuc if(m->m_lsys_vm_info.ep < 0)
132433d6423SLionel Sambuc get_usage_info_kernel(&vui);
133433d6423SLionel Sambuc else if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
134433d6423SLionel Sambuc return EINVAL;
135433d6423SLionel Sambuc else get_usage_info(&vmproc[pr], &vui);
136433d6423SLionel Sambuc
137433d6423SLionel Sambuc addr = (vir_bytes) &vui;
138433d6423SLionel Sambuc size = sizeof(vui);
139433d6423SLionel Sambuc
140433d6423SLionel Sambuc break;
141433d6423SLionel Sambuc
142433d6423SLionel Sambuc case VMIW_REGION:
14365b4b952SCristiano Giuffrida if(m->m_lsys_vm_info.ep == SELF) {
14465b4b952SCristiano Giuffrida m->m_lsys_vm_info.ep = m->m_source;
14565b4b952SCristiano Giuffrida }
146433d6423SLionel Sambuc if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
147433d6423SLionel Sambuc return EINVAL;
148433d6423SLionel Sambuc
149433d6423SLionel Sambuc count = MIN(m->m_lsys_vm_info.count, MAX_VRI_COUNT);
150433d6423SLionel Sambuc next = m->m_lsys_vm_info.next;
151433d6423SLionel Sambuc
152433d6423SLionel Sambuc count = get_region_info(&vmproc[pr], vri, count, &next);
153433d6423SLionel Sambuc
154433d6423SLionel Sambuc m->m_lsys_vm_info.count = count;
155433d6423SLionel Sambuc m->m_lsys_vm_info.next = next;
156433d6423SLionel Sambuc
157433d6423SLionel Sambuc addr = (vir_bytes) vri;
158433d6423SLionel Sambuc size = sizeof(vri[0]) * count;
159433d6423SLionel Sambuc
160433d6423SLionel Sambuc break;
161433d6423SLionel Sambuc
162433d6423SLionel Sambuc default:
163433d6423SLionel Sambuc return EINVAL;
164433d6423SLionel Sambuc }
165433d6423SLionel Sambuc
166433d6423SLionel Sambuc if (size == 0)
167433d6423SLionel Sambuc return OK;
168433d6423SLionel Sambuc
169433d6423SLionel Sambuc /* Make sure that no page faults can occur while copying out. A page
170433d6423SLionel Sambuc * fault would cause the kernel to send a notify to us, while we would
171433d6423SLionel Sambuc * be waiting for the result of the copy system call, resulting in a
172433d6423SLionel Sambuc * deadlock. Note that no memory mapping can be undone without the
173433d6423SLionel Sambuc * involvement of VM, so we are safe until we're done.
174433d6423SLionel Sambuc */
175433d6423SLionel Sambuc r = handle_memory_once(vmp, ptr, size, 1 /*wrflag*/);
176433d6423SLionel Sambuc if (r != OK) return r;
177433d6423SLionel Sambuc
178433d6423SLionel Sambuc /* Now that we know the copy out will succeed, perform the actual copy
179433d6423SLionel Sambuc * operation.
180433d6423SLionel Sambuc */
181433d6423SLionel Sambuc return sys_datacopy(SELF, addr,
182433d6423SLionel Sambuc (vir_bytes) vmp->vm_endpoint, ptr, size);
183433d6423SLionel Sambuc }
184433d6423SLionel Sambuc
185433d6423SLionel Sambuc /*===========================================================================*
186433d6423SLionel Sambuc * swap_proc_slot *
187433d6423SLionel Sambuc *===========================================================================*/
swap_proc_slot(struct vmproc * src_vmp,struct vmproc * dst_vmp)188433d6423SLionel Sambuc int swap_proc_slot(struct vmproc *src_vmp, struct vmproc *dst_vmp)
189433d6423SLionel Sambuc {
190433d6423SLionel Sambuc struct vmproc orig_src_vmproc, orig_dst_vmproc;
191433d6423SLionel Sambuc
192433d6423SLionel Sambuc #if LU_DEBUG
193433d6423SLionel Sambuc printf("VM: swap_proc: swapping %d (%d) and %d (%d)\n",
194433d6423SLionel Sambuc src_vmp->vm_endpoint, src_vmp->vm_slot,
195433d6423SLionel Sambuc dst_vmp->vm_endpoint, dst_vmp->vm_slot);
196433d6423SLionel Sambuc #endif
197433d6423SLionel Sambuc
198433d6423SLionel Sambuc /* Save existing data. */
199433d6423SLionel Sambuc orig_src_vmproc = *src_vmp;
200433d6423SLionel Sambuc orig_dst_vmproc = *dst_vmp;
201433d6423SLionel Sambuc
202433d6423SLionel Sambuc /* Swap slots. */
203433d6423SLionel Sambuc *src_vmp = orig_dst_vmproc;
204433d6423SLionel Sambuc *dst_vmp = orig_src_vmproc;
205433d6423SLionel Sambuc
206433d6423SLionel Sambuc /* Preserve endpoints and slot numbers. */
207433d6423SLionel Sambuc src_vmp->vm_endpoint = orig_src_vmproc.vm_endpoint;
208433d6423SLionel Sambuc src_vmp->vm_slot = orig_src_vmproc.vm_slot;
209433d6423SLionel Sambuc dst_vmp->vm_endpoint = orig_dst_vmproc.vm_endpoint;
210433d6423SLionel Sambuc dst_vmp->vm_slot = orig_dst_vmproc.vm_slot;
211433d6423SLionel Sambuc
212433d6423SLionel Sambuc #if LU_DEBUG
213433d6423SLionel Sambuc printf("VM: swap_proc: swapped %d (%d) and %d (%d)\n",
214433d6423SLionel Sambuc src_vmp->vm_endpoint, src_vmp->vm_slot,
215433d6423SLionel Sambuc dst_vmp->vm_endpoint, dst_vmp->vm_slot);
216433d6423SLionel Sambuc #endif
217433d6423SLionel Sambuc
218433d6423SLionel Sambuc return OK;
219433d6423SLionel Sambuc }
220433d6423SLionel Sambuc
22195cb9397SDavid van Moolenbroek /*
22295cb9397SDavid van Moolenbroek * Transfer memory mapped regions, using CoW sharing, from 'src_vmp' to
22395cb9397SDavid van Moolenbroek * 'dst_vmp', for the source process's address range of 'start_addr'
22495cb9397SDavid van Moolenbroek * (inclusive) to 'end_addr' (exclusive). Return OK or an error code.
225abf8a7e7SDavid van Moolenbroek * If the regions seem to have been transferred already, do nothing.
22695cb9397SDavid van Moolenbroek */
22795cb9397SDavid van Moolenbroek static int
transfer_mmap_regions(struct vmproc * src_vmp,struct vmproc * dst_vmp,vir_bytes start_addr,vir_bytes end_addr)228abf8a7e7SDavid van Moolenbroek transfer_mmap_regions(struct vmproc *src_vmp, struct vmproc *dst_vmp,
22995cb9397SDavid van Moolenbroek vir_bytes start_addr, vir_bytes end_addr)
23095cb9397SDavid van Moolenbroek {
231abf8a7e7SDavid van Moolenbroek struct vir_region *start_vr, *check_vr, *end_vr;
23295cb9397SDavid van Moolenbroek
23395cb9397SDavid van Moolenbroek start_vr = region_search(&src_vmp->vm_regions_avl, start_addr,
23495cb9397SDavid van Moolenbroek AVL_GREATER_EQUAL);
23595cb9397SDavid van Moolenbroek
23695cb9397SDavid van Moolenbroek if (start_vr == NULL || start_vr->vaddr >= end_addr)
23795cb9397SDavid van Moolenbroek return OK; /* nothing to do */
23895cb9397SDavid van Moolenbroek
239abf8a7e7SDavid van Moolenbroek /* In the case of multicomponent live update that includes VM, this
240abf8a7e7SDavid van Moolenbroek * function may be called for the same process more than once, for the
241abf8a7e7SDavid van Moolenbroek * sake of keeping code paths as little divergent as possible while at
242abf8a7e7SDavid van Moolenbroek * the same time ensuring that the regions are copied early enough.
243abf8a7e7SDavid van Moolenbroek *
244abf8a7e7SDavid van Moolenbroek * To compensate for these multiple calls, we perform a very simple
245abf8a7e7SDavid van Moolenbroek * check here to see if the region to transfer is already present in
246abf8a7e7SDavid van Moolenbroek * the target process. If so, we can safely skip copying the regions
247abf8a7e7SDavid van Moolenbroek * again, because there is no other possible explanation for the
248abf8a7e7SDavid van Moolenbroek * region being present already. Things would go horribly wrong if we
249abf8a7e7SDavid van Moolenbroek * tried copying anyway, but this check is not good enough to detect
250abf8a7e7SDavid van Moolenbroek * all such problems, since we do a check on the base address only.
251abf8a7e7SDavid van Moolenbroek */
252abf8a7e7SDavid van Moolenbroek check_vr = region_search(&dst_vmp->vm_regions_avl, start_vr->vaddr,
253abf8a7e7SDavid van Moolenbroek AVL_EQUAL);
254abf8a7e7SDavid van Moolenbroek if (check_vr != NULL) {
255abf8a7e7SDavid van Moolenbroek #if LU_DEBUG
256abf8a7e7SDavid van Moolenbroek printf("VM: transfer_mmap_regions: skipping transfer from "
257abf8a7e7SDavid van Moolenbroek "%d to %d (0x%lx already present)\n",
258abf8a7e7SDavid van Moolenbroek src_vmp->vm_endpoint, dst_vmp->vm_endpoint,
259abf8a7e7SDavid van Moolenbroek start_vr->vaddr);
260abf8a7e7SDavid van Moolenbroek #endif
261abf8a7e7SDavid van Moolenbroek return OK;
262abf8a7e7SDavid van Moolenbroek }
263abf8a7e7SDavid van Moolenbroek
26495cb9397SDavid van Moolenbroek end_vr = region_search(&src_vmp->vm_regions_avl, end_addr, AVL_LESS);
26595cb9397SDavid van Moolenbroek assert(end_vr != NULL);
26695cb9397SDavid van Moolenbroek assert(start_vr->vaddr <= end_vr->vaddr);
26795cb9397SDavid van Moolenbroek
26895cb9397SDavid van Moolenbroek #if LU_DEBUG
26995cb9397SDavid van Moolenbroek printf("VM: transfer_mmap_regions: transferring memory mapped regions "
27095cb9397SDavid van Moolenbroek "from %d to %d (0x%lx to 0x%lx)\n", src_vmp->vm_endpoint,
27195cb9397SDavid van Moolenbroek dst_vmp->vm_endpoint, start_vr->vaddr, end_vr->vaddr);
27295cb9397SDavid van Moolenbroek #endif
27395cb9397SDavid van Moolenbroek
27495cb9397SDavid van Moolenbroek return map_proc_copy_range(dst_vmp, src_vmp, start_vr, end_vr);
27595cb9397SDavid van Moolenbroek }
27695cb9397SDavid van Moolenbroek
277abf8a7e7SDavid van Moolenbroek /*
278abf8a7e7SDavid van Moolenbroek * Create copy-on-write mappings in process 'dst_vmp' for all memory-mapped
279abf8a7e7SDavid van Moolenbroek * regions present in 'src_vmp'. Return OK on success, or an error otherwise.
280abf8a7e7SDavid van Moolenbroek * In the case of failure, successfully created mappings are not undone.
281abf8a7e7SDavid van Moolenbroek */
282abf8a7e7SDavid van Moolenbroek int
map_proc_dyn_data(struct vmproc * src_vmp,struct vmproc * dst_vmp)283abf8a7e7SDavid van Moolenbroek map_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
284abf8a7e7SDavid van Moolenbroek {
285abf8a7e7SDavid van Moolenbroek int r;
286abf8a7e7SDavid van Moolenbroek
287abf8a7e7SDavid van Moolenbroek #if LU_DEBUG
288abf8a7e7SDavid van Moolenbroek printf("VM: mapping dynamic data from %d to %d\n",
289abf8a7e7SDavid van Moolenbroek src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
290abf8a7e7SDavid van Moolenbroek #endif
291abf8a7e7SDavid van Moolenbroek
292abf8a7e7SDavid van Moolenbroek /* Transfer memory mapped regions now. To sandbox the new instance and
293abf8a7e7SDavid van Moolenbroek * prevent state corruption on rollback, we share all the regions
294abf8a7e7SDavid van Moolenbroek * between the two instances as COW.
295abf8a7e7SDavid van Moolenbroek */
296abf8a7e7SDavid van Moolenbroek r = transfer_mmap_regions(src_vmp, dst_vmp, VM_MMAPBASE, VM_MMAPTOP);
297abf8a7e7SDavid van Moolenbroek
298abf8a7e7SDavid van Moolenbroek /* If the stack is not mapped at the VM_DATATOP, there might be some
299abf8a7e7SDavid van Moolenbroek * more regions hiding above the stack. We also have to transfer
300abf8a7e7SDavid van Moolenbroek * those.
301abf8a7e7SDavid van Moolenbroek */
302abf8a7e7SDavid van Moolenbroek if (r == OK && VM_STACKTOP < VM_DATATOP)
303abf8a7e7SDavid van Moolenbroek r = transfer_mmap_regions(src_vmp, dst_vmp, VM_STACKTOP,
304abf8a7e7SDavid van Moolenbroek VM_DATATOP);
305abf8a7e7SDavid van Moolenbroek
306abf8a7e7SDavid van Moolenbroek return r;
307abf8a7e7SDavid van Moolenbroek }
308abf8a7e7SDavid van Moolenbroek
309433d6423SLionel Sambuc /*===========================================================================*
310433d6423SLionel Sambuc * swap_proc_dyn_data *
311433d6423SLionel Sambuc *===========================================================================*/
swap_proc_dyn_data(struct vmproc * src_vmp,struct vmproc * dst_vmp,int sys_upd_flags)31263483e02SCristiano Giuffrida int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp,
31363483e02SCristiano Giuffrida int sys_upd_flags)
314433d6423SLionel Sambuc {
315433d6423SLionel Sambuc int is_vm;
316433d6423SLionel Sambuc int r;
317433d6423SLionel Sambuc
318433d6423SLionel Sambuc is_vm = (dst_vmp->vm_endpoint == VM_PROC_NR);
319433d6423SLionel Sambuc
32063483e02SCristiano Giuffrida /* For VM, transfer memory mapped regions first. */
321433d6423SLionel Sambuc if(is_vm) {
322433d6423SLionel Sambuc #if LU_DEBUG
32363483e02SCristiano Giuffrida printf("VM: swap_proc_dyn_data: tranferring memory mapped regions from old (%d) to new VM (%d)\n",
324433d6423SLionel Sambuc src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
325433d6423SLionel Sambuc #endif
32663483e02SCristiano Giuffrida r = pt_map_in_range(src_vmp, dst_vmp, VM_OWN_HEAPBASE, VM_OWN_MMAPTOP);
327433d6423SLionel Sambuc if(r != OK) {
328433d6423SLionel Sambuc printf("swap_proc_dyn_data: pt_map_in_range failed\n");
329433d6423SLionel Sambuc return r;
330433d6423SLionel Sambuc }
331a6db4d0aSDirk Vogt r = pt_map_in_range(src_vmp, dst_vmp, VM_STACKTOP, VM_DATATOP);
332a6db4d0aSDirk Vogt if(r != OK) {
333a6db4d0aSDirk Vogt printf("swap_proc_dyn_data: pt_map_in_range failed\n");
334a6db4d0aSDirk Vogt return r;
335a6db4d0aSDirk Vogt }
336a6db4d0aSDirk Vogt
337433d6423SLionel Sambuc }
338433d6423SLionel Sambuc
339433d6423SLionel Sambuc #if LU_DEBUG
340433d6423SLionel Sambuc printf("VM: swap_proc_dyn_data: swapping regions' parents for %d (%d) and %d (%d)\n",
341433d6423SLionel Sambuc src_vmp->vm_endpoint, src_vmp->vm_slot,
342433d6423SLionel Sambuc dst_vmp->vm_endpoint, dst_vmp->vm_slot);
343433d6423SLionel Sambuc #endif
344433d6423SLionel Sambuc
345433d6423SLionel Sambuc /* Swap vir_regions' parents. */
346433d6423SLionel Sambuc map_setparent(src_vmp);
347433d6423SLionel Sambuc map_setparent(dst_vmp);
348433d6423SLionel Sambuc
34963483e02SCristiano Giuffrida /* Don't transfer mmapped regions if not required. */
35063483e02SCristiano Giuffrida if(is_vm || (sys_upd_flags & (SF_VM_ROLLBACK|SF_VM_NOMMAP))) {
35163483e02SCristiano Giuffrida return OK;
35263483e02SCristiano Giuffrida }
35363483e02SCristiano Giuffrida
35463483e02SCristiano Giuffrida /* Make sure regions are consistent. */
35563483e02SCristiano Giuffrida assert(region_search_root(&src_vmp->vm_regions_avl) && region_search_root(&dst_vmp->vm_regions_avl));
35663483e02SCristiano Giuffrida
357abf8a7e7SDavid van Moolenbroek /* Source and destination are intentionally swapped here! */
358abf8a7e7SDavid van Moolenbroek return map_proc_dyn_data(dst_vmp, src_vmp);
359a6db4d0aSDirk Vogt }
360433d6423SLionel Sambuc
mmap(void * addr,size_t len,int f,int f2,int f3,off_t o)361433d6423SLionel Sambuc void *mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
362433d6423SLionel Sambuc {
363433d6423SLionel Sambuc void *ret;
364433d6423SLionel Sambuc phys_bytes p;
365433d6423SLionel Sambuc
366433d6423SLionel Sambuc assert(!addr);
367433d6423SLionel Sambuc assert(!(len % VM_PAGE_SIZE));
368433d6423SLionel Sambuc
369433d6423SLionel Sambuc ret = vm_allocpages(&p, VMP_SLAB, len/VM_PAGE_SIZE);
370433d6423SLionel Sambuc
371433d6423SLionel Sambuc if(!ret) return MAP_FAILED;
372433d6423SLionel Sambuc memset(ret, 0, len);
373433d6423SLionel Sambuc return ret;
374433d6423SLionel Sambuc }
375433d6423SLionel Sambuc
munmap(void * addr,size_t len)376433d6423SLionel Sambuc int munmap(void * addr, size_t len)
377433d6423SLionel Sambuc {
378433d6423SLionel Sambuc vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
379433d6423SLionel Sambuc return 0;
380433d6423SLionel Sambuc }
381433d6423SLionel Sambuc
382d196e2c3SCristiano Giuffrida #ifdef __weak_alias
__weak_alias(brk,_brk)383d196e2c3SCristiano Giuffrida __weak_alias(brk, _brk)
384d196e2c3SCristiano Giuffrida #endif
385d196e2c3SCristiano Giuffrida int _brk(void *addr)
386433d6423SLionel Sambuc {
387433d6423SLionel Sambuc /* brk is a special case function to allow vm itself to
388433d6423SLionel Sambuc allocate memory in it's own (cacheable) HEAP */
389433d6423SLionel Sambuc vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
390433d6423SLionel Sambuc extern char _end;
391433d6423SLionel Sambuc extern char *_brksize;
392433d6423SLionel Sambuc static vir_bytes prevbrk = (vir_bytes) &_end;
393433d6423SLionel Sambuc struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
394433d6423SLionel Sambuc
395433d6423SLionel Sambuc for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
396433d6423SLionel Sambuc v += VM_PAGE_SIZE) {
397433d6423SLionel Sambuc phys_bytes mem, newpage = alloc_mem(1, 0);
398433d6423SLionel Sambuc if(newpage == NO_MEM) return -1;
399433d6423SLionel Sambuc mem = CLICK2ABS(newpage);
400433d6423SLionel Sambuc if(pt_writemap(vmprocess, &vmprocess->vm_pt,
401433d6423SLionel Sambuc v, mem, VM_PAGE_SIZE,
402433d6423SLionel Sambuc ARCH_VM_PTE_PRESENT
403433d6423SLionel Sambuc | ARCH_VM_PTE_USER
404433d6423SLionel Sambuc | ARCH_VM_PTE_RW
405433d6423SLionel Sambuc #if defined(__arm__)
406433d6423SLionel Sambuc | ARM_VM_PTE_CACHED
407433d6423SLionel Sambuc #endif
408433d6423SLionel Sambuc , 0) != OK) {
409433d6423SLionel Sambuc free_mem(newpage, 1);
410433d6423SLionel Sambuc return -1;
411433d6423SLionel Sambuc }
412433d6423SLionel Sambuc prevbrk = v + VM_PAGE_SIZE;
413433d6423SLionel Sambuc }
414433d6423SLionel Sambuc
415433d6423SLionel Sambuc _brksize = (char *) addr;
416433d6423SLionel Sambuc
417433d6423SLionel Sambuc if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
418433d6423SLionel Sambuc panic("flushtlb failed");
419433d6423SLionel Sambuc
420433d6423SLionel Sambuc return 0;
421433d6423SLionel Sambuc }
422433d6423SLionel Sambuc
423433d6423SLionel Sambuc /*===========================================================================*
424433d6423SLionel Sambuc * do_getrusage *
425433d6423SLionel Sambuc *===========================================================================*/
do_getrusage(message * m)426433d6423SLionel Sambuc int do_getrusage(message *m)
427433d6423SLionel Sambuc {
428433d6423SLionel Sambuc int res, slot;
429433d6423SLionel Sambuc struct vmproc *vmp;
430433d6423SLionel Sambuc struct rusage r_usage;
431*bc2d75faSDavid van Moolenbroek
432*bc2d75faSDavid van Moolenbroek /* If the request is not from PM, it is coming directly from userland.
433*bc2d75faSDavid van Moolenbroek * This is an obsolete construction. In the future, userland programs
434*bc2d75faSDavid van Moolenbroek * should no longer be allowed to call vm_getrusage(2) directly at all.
435*bc2d75faSDavid van Moolenbroek * For backward compatibility, we simply return success for now.
436*bc2d75faSDavid van Moolenbroek */
437*bc2d75faSDavid van Moolenbroek if (m->m_source != PM_PROC_NR)
438*bc2d75faSDavid van Moolenbroek return OK;
439*bc2d75faSDavid van Moolenbroek
440*bc2d75faSDavid van Moolenbroek /* Get the process for which resource usage is requested. */
441*bc2d75faSDavid van Moolenbroek if ((res = vm_isokendpt(m->m_lsys_vm_rusage.endpt, &slot)) != OK)
442433d6423SLionel Sambuc return ESRCH;
443433d6423SLionel Sambuc
444433d6423SLionel Sambuc vmp = &vmproc[slot];
445433d6423SLionel Sambuc
446*bc2d75faSDavid van Moolenbroek /* We are going to change only a few fields, so copy in the rusage
447*bc2d75faSDavid van Moolenbroek * structure first. The structure is still in PM's address space at
448*bc2d75faSDavid van Moolenbroek * this point, so use the message source.
449*bc2d75faSDavid van Moolenbroek */
450*bc2d75faSDavid van Moolenbroek if ((res = sys_datacopy(m->m_source, m->m_lsys_vm_rusage.addr,
451433d6423SLionel Sambuc SELF, (vir_bytes) &r_usage, (vir_bytes) sizeof(r_usage))) < 0)
452433d6423SLionel Sambuc return res;
453433d6423SLionel Sambuc
454*bc2d75faSDavid van Moolenbroek if (!m->m_lsys_vm_rusage.children) {
455*bc2d75faSDavid van Moolenbroek r_usage.ru_maxrss = vmp->vm_total_max / 1024L; /* unit is KB */
456433d6423SLionel Sambuc r_usage.ru_minflt = vmp->vm_minor_page_fault;
457433d6423SLionel Sambuc r_usage.ru_majflt = vmp->vm_major_page_fault;
458*bc2d75faSDavid van Moolenbroek } else {
459*bc2d75faSDavid van Moolenbroek /* XXX TODO: return the fields for terminated, waited-for
460*bc2d75faSDavid van Moolenbroek * children of the given process. We currently do not have this
461*bc2d75faSDavid van Moolenbroek * information! In the future, rather than teaching VM about
462*bc2d75faSDavid van Moolenbroek * the process hierarchy, PM should probably tell VM at process
463*bc2d75faSDavid van Moolenbroek * exit time which other process should inherit its resource
464*bc2d75faSDavid van Moolenbroek * usage fields. For now, we assume PM clears the fields before
465*bc2d75faSDavid van Moolenbroek * making this call, so we don't zero the fields explicitly.
466*bc2d75faSDavid van Moolenbroek */
467*bc2d75faSDavid van Moolenbroek }
468433d6423SLionel Sambuc
469*bc2d75faSDavid van Moolenbroek /* Copy out the resulting structure back to PM. */
470433d6423SLionel Sambuc return sys_datacopy(SELF, (vir_bytes) &r_usage, m->m_source,
471*bc2d75faSDavid van Moolenbroek m->m_lsys_vm_rusage.addr, (vir_bytes) sizeof(r_usage));
472433d6423SLionel Sambuc }
47363483e02SCristiano Giuffrida
47463483e02SCristiano Giuffrida /*===========================================================================*
47563483e02SCristiano Giuffrida * adjust_proc_refs *
47663483e02SCristiano Giuffrida *===========================================================================*/
adjust_proc_refs()47763483e02SCristiano Giuffrida void adjust_proc_refs()
47863483e02SCristiano Giuffrida {
47963483e02SCristiano Giuffrida struct vmproc *vmp;
48063483e02SCristiano Giuffrida region_iter iter;
48163483e02SCristiano Giuffrida
48263483e02SCristiano Giuffrida /* Fix up region parents. */
48363483e02SCristiano Giuffrida for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) {
48463483e02SCristiano Giuffrida struct vir_region *vr;
48563483e02SCristiano Giuffrida if(!(vmp->vm_flags & VMF_INUSE))
48663483e02SCristiano Giuffrida continue;
48763483e02SCristiano Giuffrida region_start_iter_least(&vmp->vm_regions_avl, &iter);
48863483e02SCristiano Giuffrida while((vr = region_get_iter(&iter))) {
48963483e02SCristiano Giuffrida USE(vr, vr->parent = vmp;);
49063483e02SCristiano Giuffrida region_incr_iter(&iter);
49163483e02SCristiano Giuffrida }
49263483e02SCristiano Giuffrida }
49363483e02SCristiano Giuffrida }
49463483e02SCristiano Giuffrida
495