xref: /minix3/minix/kernel/system/do_vmctl.c (revision 35b65c5af1d53112a49b08e87ddc00d7e24625e9)
1 /* The kernel call implemented in this file:
2  *   m_type:	SYS_VMCTL
3  *
4  * The parameters for this kernel call are:
5  *   	SVMCTL_WHO	which process
6  *    	SVMCTL_PARAM	set this setting (VMCTL_*)
7  *    	SVMCTL_VALUE	to this value
8  */
9 
10 #include "kernel/system.h"
11 #include "kernel/vm.h"
12 #include <assert.h>
13 
14 /*===========================================================================*
15  *				do_vmctl				     *
16  *===========================================================================*/
17 int do_vmctl(struct proc * caller, message * m_ptr)
18 {
19   int proc_nr;
20   endpoint_t ep = m_ptr->SVMCTL_WHO;
21   struct proc *p, *rp, **rpp, *target;
22 
23   if(ep == SELF) { ep = caller->p_endpoint; }
24 
25   if(!isokendpt(ep, &proc_nr)) {
26 	printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
27 	return EINVAL;
28   }
29 
30   p = proc_addr(proc_nr);
31 
32   switch(m_ptr->SVMCTL_PARAM) {
33 	case VMCTL_CLEAR_PAGEFAULT:
34 		assert(RTS_ISSET(p,RTS_PAGEFAULT));
35 		RTS_UNSET(p, RTS_PAGEFAULT);
36 		return OK;
37 	case VMCTL_MEMREQ_GET:
38 		/* Send VM the information about the memory request. We can
39 		 * not simply send the first request on the list, because IPC
40 		 * filters may forbid VM from getting requests for particular
41 		 * sources. However, IPC filters are used only in rare cases.
42 		 */
43 		for (rpp = &vmrequest; *rpp != NULL;
44 		    rpp = &(*rpp)->p_vmrequest.nextrequestor) {
45 			rp = *rpp;
46 
47 			assert(RTS_ISSET(rp, RTS_VMREQUEST));
48 
49 			okendpt(rp->p_vmrequest.target, &proc_nr);
50 			target = proc_addr(proc_nr);
51 
52 			/* Check against IPC filters. */
53 			if (!allow_ipc_filtered_memreq(rp, target))
54 				continue;
55 
56 			/* Reply with request fields. */
57 			if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
58 				panic("VMREQUEST wrong type");
59 
60 			m_ptr->SVMCTL_MRG_TARGET	=
61 				rp->p_vmrequest.target;
62 			m_ptr->SVMCTL_MRG_ADDR		=
63 				rp->p_vmrequest.params.check.start;
64 			m_ptr->SVMCTL_MRG_LENGTH	=
65 				rp->p_vmrequest.params.check.length;
66 			m_ptr->SVMCTL_MRG_FLAG		=
67 				rp->p_vmrequest.params.check.writeflag;
68 			m_ptr->SVMCTL_MRG_REQUESTOR	=
69 				(void *) rp->p_endpoint;
70 
71 			rp->p_vmrequest.vmresult = VMSUSPEND;
72 
73 			/* Remove from request chain. */
74 			*rpp = rp->p_vmrequest.nextrequestor;
75 
76 			return rp->p_vmrequest.req_type;
77 		}
78 
79 		return ENOENT;
80 
81 	case VMCTL_MEMREQ_REPLY:
82 		assert(RTS_ISSET(p, RTS_VMREQUEST));
83 		assert(p->p_vmrequest.vmresult == VMSUSPEND);
84   		okendpt(p->p_vmrequest.target, &proc_nr);
85 		target = proc_addr(proc_nr);
86 		p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
87 		assert(p->p_vmrequest.vmresult != VMSUSPEND);
88 
89 		switch(p->p_vmrequest.type) {
90 		case VMSTYPE_KERNELCALL:
91 			/*
92 			 * we will have to resume execution of the kernel call
93 			 * as soon the scheduler picks up this process again
94 			 */
95 			p->p_misc_flags |= MF_KCALL_RESUME;
96 			break;
97 		case VMSTYPE_DELIVERMSG:
98 			assert(p->p_misc_flags & MF_DELIVERMSG);
99 			assert(p == target);
100 			assert(RTS_ISSET(p, RTS_VMREQUEST));
101 			break;
102 		case VMSTYPE_MAP:
103 			assert(RTS_ISSET(p, RTS_VMREQUEST));
104 			break;
105 		default:
106 			panic("strange request type: %d",p->p_vmrequest.type);
107 		}
108 
109 		RTS_UNSET(p, RTS_VMREQUEST);
110 		return OK;
111 
112 	case VMCTL_KERN_PHYSMAP:
113 	{
114 		int i = m_ptr->SVMCTL_VALUE;
115 		return arch_phys_map(i,
116 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
117 			(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
118 			&m_ptr->SVMCTL_MAP_FLAGS);
119 	}
120 	case VMCTL_KERN_MAP_REPLY:
121 	{
122 		return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
123 			(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
124 	}
125 	case VMCTL_VMINHIBIT_SET:
126 		/* check if we must stop a process on a different CPU */
127 #if CONFIG_SMP
128 		if (p->p_cpu != cpuid) {
129 			smp_schedule_vminhibit(p);
130 		} else
131 #endif
132 			RTS_SET(p, RTS_VMINHIBIT);
133 #if CONFIG_SMP
134 		p->p_misc_flags |= MF_FLUSH_TLB;
135 #endif
136 		return OK;
137 	case VMCTL_VMINHIBIT_CLEAR:
138 		assert(RTS_ISSET(p, RTS_VMINHIBIT));
139 		/*
140 		 * the processes is certainly not runnable, no need to tell its
141 		 * cpu
142 		 */
143 		RTS_UNSET(p, RTS_VMINHIBIT);
144 #ifdef CONFIG_SMP
145 		if (p->p_misc_flags & MF_SENDA_VM_MISS) {
146 			struct priv *privp;
147 			p->p_misc_flags &= ~MF_SENDA_VM_MISS;
148 			privp = priv(p);
149 			try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
150 							privp->s_asynsize);
151 		}
152 		/*
153 		 * We don't know whether kernel has the changed mapping
154 		 * installed to access userspace memory. And if so, on what CPU.
155 		 * More over we don't know what mapping has changed and how and
156 		 * therefore we must invalidate all mappings we have anywhere.
157 		 * Next time we map memory, we map it fresh.
158 		 */
159 		bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
160 #endif
161 		return OK;
162 	case VMCTL_CLEARMAPCACHE:
163 		/* VM says: forget about old mappings we have cached. */
164 		mem_clear_mapcache();
165 		return OK;
166 	case VMCTL_BOOTINHIBIT_CLEAR:
167 		RTS_UNSET(p, RTS_BOOTINHIBIT);
168 		return OK;
169   }
170 
171   /* Try architecture-specific vmctls. */
172   return arch_do_vmctl(m_ptr, p);
173 }
174