1 2 #define _SYSTEM 1 3 4 #include <minix/callnr.h> 5 #include <minix/com.h> 6 #include <minix/config.h> 7 #include <minix/const.h> 8 #include <minix/ds.h> 9 #include <minix/endpoint.h> 10 #include <minix/minlib.h> 11 #include <minix/type.h> 12 #include <minix/ipc.h> 13 #include <minix/sysutil.h> 14 #include <minix/syslib.h> 15 #include <minix/safecopies.h> 16 #include <minix/bitmap.h> 17 #include <minix/rs.h> 18 19 #include <sys/mman.h> 20 21 #include <errno.h> 22 #include <string.h> 23 #include <env.h> 24 #include <stdio.h> 25 #include <assert.h> 26 27 #include "glo.h" 28 #include "proto.h" 29 #include "util.h" 30 #include "region.h" 31 32 /*===========================================================================* 33 * do_rs_set_priv * 34 *===========================================================================*/ 35 int do_rs_set_priv(message *m) 36 { 37 int r, n, nr; 38 struct vmproc *vmp; 39 bitchunk_t call_mask[VM_CALL_MASK_SIZE], *call_mask_p; 40 41 nr = m->VM_RS_NR; 42 43 if ((r = vm_isokendpt(nr, &n)) != OK) { 44 printf("do_rs_set_priv: bad endpoint %d\n", nr); 45 return EINVAL; 46 } 47 48 vmp = &vmproc[n]; 49 50 if (m->VM_RS_BUF) { 51 r = sys_datacopy(m->m_source, (vir_bytes) m->VM_RS_BUF, SELF, 52 (vir_bytes) call_mask, sizeof(call_mask)); 53 if (r != OK) 54 return r; 55 call_mask_p = call_mask; 56 } else { 57 if (m->VM_RS_SYS) { 58 printf("VM: do_rs_set_priv: sys procs don't share!\n"); 59 return EINVAL; 60 } 61 call_mask_p = NULL; 62 } 63 64 acl_set(vmp, call_mask_p, m->VM_RS_SYS); 65 66 return OK; 67 } 68 69 /*===========================================================================* 70 * do_rs_update * 71 *===========================================================================*/ 72 int do_rs_update(message *m_ptr) 73 { 74 endpoint_t src_e, dst_e, reply_e; 75 int src_p, dst_p; 76 struct vmproc *src_vmp, *dst_vmp; 77 int r, sys_upd_flags; 78 79 src_e = m_ptr->m_lsys_vm_update.src; 80 dst_e = m_ptr->m_lsys_vm_update.dst; 81 sys_upd_flags = m_ptr->m_lsys_vm_update.flags; 82 reply_e = m_ptr->m_source; 83 84 /* Lookup slots for source and destination process. */ 85 if(vm_isokendpt(src_e, &src_p) != OK) { 86 printf("do_rs_update: bad src endpoint %d\n", src_e); 87 return EINVAL; 88 } 89 src_vmp = &vmproc[src_p]; 90 if(vm_isokendpt(dst_e, &dst_p) != OK) { 91 printf("do_rs_update: bad dst endpoint %d\n", dst_e); 92 return EINVAL; 93 } 94 dst_vmp = &vmproc[dst_p]; 95 96 /* Check flags. */ 97 if((sys_upd_flags & (SF_VM_ROLLBACK|SF_VM_NOMMAP)) == 0) { 98 /* Can't preallocate when transfering mmapped regions. */ 99 if(map_region_lookup_type(dst_vmp, VR_PREALLOC_MAP)) { 100 return ENOSYS; 101 } 102 } 103 104 /* Let the kernel do the update first. */ 105 r = sys_update(src_e, dst_e, 106 sys_upd_flags & SF_VM_ROLLBACK ? SYS_UPD_ROLLBACK : 0); 107 if(r != OK) { 108 return r; 109 } 110 111 /* Do the update in VM now. */ 112 r = swap_proc_slot(src_vmp, dst_vmp); 113 if(r != OK) { 114 return r; 115 } 116 r = swap_proc_dyn_data(src_vmp, dst_vmp, sys_upd_flags); 117 if(r != OK) { 118 return r; 119 } 120 pt_bind(&src_vmp->vm_pt, src_vmp); 121 pt_bind(&dst_vmp->vm_pt, dst_vmp); 122 123 /* Reply in case of external request, update-aware. */ 124 if(reply_e != VM_PROC_NR) { 125 if(reply_e == src_e) reply_e = dst_e; 126 else if(reply_e == dst_e) reply_e = src_e; 127 m_ptr->m_type = OK; 128 r = ipc_send(reply_e, m_ptr); 129 if(r != OK) { 130 panic("ipc_send() error"); 131 } 132 } 133 134 return SUSPEND; 135 } 136 137 /*===========================================================================* 138 * rs_memctl_make_vm_instance * 139 *===========================================================================*/ 140 static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp) 141 { 142 int r; 143 u32_t flags; 144 int verify; 145 struct vmproc *this_vm_vmp; 146 147 this_vm_vmp = &vmproc[VM_PROC_NR]; 148 149 pt_assert(&this_vm_vmp->vm_pt); 150 151 /* Check if the operation is allowed. */ 152 assert(num_vm_instances == 1 || num_vm_instances == 2); 153 if(num_vm_instances == 2) { 154 printf("VM can currently support no more than 2 VM instances at the time."); 155 return EPERM; 156 } 157 158 /* Copy settings from current VM. */ 159 new_vm_vmp->vm_flags |= VMF_VM_INSTANCE; 160 num_vm_instances++; 161 162 /* Pin memory for the new VM instance. */ 163 r = map_pin_memory(new_vm_vmp); 164 if(r != OK) { 165 return r; 166 } 167 168 /* Preallocate page tables for the entire address space for both 169 * VM and the new VM instance. 170 */ 171 flags = 0; 172 verify = FALSE; 173 r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt, 174 VM_OWN_HEAPBASE, VM_DATATOP, flags, verify); 175 if(r != OK) { 176 return r; 177 } 178 r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt, 179 VM_OWN_HEAPBASE, VM_DATATOP, flags, verify); 180 if(r != OK) { 181 return r; 182 } 183 184 /* Let the new VM instance map VM's page tables and its own. */ 185 r = pt_ptmap(this_vm_vmp, new_vm_vmp); 186 if(r != OK) { 187 return r; 188 } 189 r = pt_ptmap(new_vm_vmp, new_vm_vmp); 190 if(r != OK) { 191 return r; 192 } 193 194 pt_assert(&this_vm_vmp->vm_pt); 195 pt_assert(&new_vm_vmp->vm_pt); 196 197 return OK; 198 } 199 200 /*===========================================================================* 201 * rs_memctl_heap_prealloc * 202 *===========================================================================*/ 203 static int rs_memctl_heap_prealloc(struct vmproc *vmp, 204 vir_bytes *addr, size_t *len) 205 { 206 struct vir_region *data_vr; 207 vir_bytes bytes; 208 209 if(*len <= 0) { 210 return EINVAL; 211 } 212 data_vr = region_search(&vmp->vm_regions_avl, VM_MMAPBASE, AVL_LESS); 213 *addr = data_vr->vaddr + data_vr->length; 214 bytes = *addr + *len; 215 216 return real_brk(vmp, bytes); 217 } 218 219 /*===========================================================================* 220 * rs_memctl_map_prealloc * 221 *===========================================================================*/ 222 static int rs_memctl_map_prealloc(struct vmproc *vmp, 223 vir_bytes *addr, size_t *len) 224 { 225 struct vir_region *vr; 226 vir_bytes base, top; 227 int is_vm; 228 229 if(*len <= 0) { 230 return EINVAL; 231 } 232 *len = CLICK_CEIL(*len); 233 234 is_vm = (vmp->vm_endpoint == VM_PROC_NR); 235 base = is_vm ? VM_OWN_MMAPBASE : VM_MMAPBASE; 236 top = is_vm ? VM_OWN_MMAPTOP : VM_MMAPTOP; 237 238 if (!(vr = map_page_region(vmp, base, top, *len, 239 VR_ANON|VR_WRITABLE|VR_UNINITIALIZED, MF_PREALLOC, 240 &mem_type_anon))) { 241 return ENOMEM; 242 } 243 vr->flags |= VR_PREALLOC_MAP; 244 *addr = vr->vaddr; 245 return OK; 246 } 247 248 /*===========================================================================* 249 * rs_memctl_get_prealloc_map * 250 *===========================================================================*/ 251 static int rs_memctl_get_prealloc_map(struct vmproc *vmp, 252 vir_bytes *addr, size_t *len) 253 { 254 struct vir_region *vr; 255 256 vr = map_region_lookup_type(vmp, VR_PREALLOC_MAP); 257 if(!vr) { 258 *addr = 0; 259 *len = 0; 260 } 261 else { 262 *addr = vr->vaddr; 263 *len = vr->length; 264 } 265 return OK; 266 } 267 268 /*===========================================================================* 269 * do_rs_memctl * 270 *===========================================================================*/ 271 int do_rs_memctl(message *m_ptr) 272 { 273 endpoint_t ep; 274 int req, r, proc_nr; 275 struct vmproc *vmp; 276 277 ep = m_ptr->VM_RS_CTL_ENDPT; 278 req = m_ptr->VM_RS_CTL_REQ; 279 280 /* Lookup endpoint. */ 281 if ((r = vm_isokendpt(ep, &proc_nr)) != OK) { 282 printf("do_rs_memctl: bad endpoint %d\n", ep); 283 return EINVAL; 284 } 285 vmp = &vmproc[proc_nr]; 286 287 /* Process request. */ 288 switch(req) 289 { 290 case VM_RS_MEM_PIN: 291 /* Only actually pin RS memory if VM can recover from crashes (saves memory). */ 292 if (num_vm_instances <= 1) 293 return OK; 294 r = map_pin_memory(vmp); 295 return r; 296 case VM_RS_MEM_MAKE_VM: 297 r = rs_memctl_make_vm_instance(vmp); 298 return r; 299 case VM_RS_MEM_HEAP_PREALLOC: 300 r = rs_memctl_heap_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN); 301 return r; 302 case VM_RS_MEM_MAP_PREALLOC: 303 r = rs_memctl_map_prealloc(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN); 304 return r; 305 case VM_RS_MEM_GET_PREALLOC_MAP: 306 r = rs_memctl_get_prealloc_map(vmp, (vir_bytes*) &m_ptr->VM_RS_CTL_ADDR, (size_t*) &m_ptr->VM_RS_CTL_LEN); 307 return r; 308 default: 309 printf("do_rs_memctl: bad request %d\n", req); 310 return EINVAL; 311 } 312 } 313 314