1433d6423SLionel Sambuc /* This file contains essentially all of the process and message handling.
2433d6423SLionel Sambuc * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3433d6423SLionel Sambuc * There is one entry point from the outside:
4433d6423SLionel Sambuc *
5433d6423SLionel Sambuc * sys_call: a system call, i.e., the kernel is trapped with an INT
6433d6423SLionel Sambuc *
7433d6423SLionel Sambuc * Changes:
8433d6423SLionel Sambuc * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
9433d6423SLionel Sambuc * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
10433d6423SLionel Sambuc * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
11433d6423SLionel Sambuc * May 24, 2005 new notification system call (Jorrit N. Herder)
12433d6423SLionel Sambuc * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
13433d6423SLionel Sambuc *
14433d6423SLionel Sambuc * The code here is critical to make everything work and is important for the
15433d6423SLionel Sambuc * overall performance of the system. A large fraction of the code deals with
16433d6423SLionel Sambuc * list manipulation. To make this both easy to understand and fast to execute
17433d6423SLionel Sambuc * pointer pointers are used throughout the code. Pointer pointers prevent
18433d6423SLionel Sambuc * exceptions for the head or tail of a linked list.
19433d6423SLionel Sambuc *
20433d6423SLionel Sambuc * node_t *queue, *new_node; // assume these as global variables
21433d6423SLionel Sambuc * node_t **xpp = &queue; // get pointer pointer to head of queue
22433d6423SLionel Sambuc * while (*xpp != NULL) // find last pointer of the linked list
23433d6423SLionel Sambuc * xpp = &(*xpp)->next; // get pointer to next pointer
24433d6423SLionel Sambuc * *xpp = new_node; // now replace the end (the NULL pointer)
25433d6423SLionel Sambuc * new_node->next = NULL; // and mark the new end of the list
26433d6423SLionel Sambuc *
27433d6423SLionel Sambuc * For example, when adding a new node to the end of the list, one normally
28433d6423SLionel Sambuc * makes an exception for an empty list and looks up the end of the list for
29433d6423SLionel Sambuc * nonempty lists. As shown above, this is not required with pointer pointers.
30433d6423SLionel Sambuc */
31433d6423SLionel Sambuc
32433d6423SLionel Sambuc #include <stddef.h>
33433d6423SLionel Sambuc #include <signal.h>
34433d6423SLionel Sambuc #include <assert.h>
35433d6423SLionel Sambuc #include <string.h>
36433d6423SLionel Sambuc
37433d6423SLionel Sambuc #include "vm.h"
38433d6423SLionel Sambuc #include "clock.h"
39433d6423SLionel Sambuc #include "spinlock.h"
40433d6423SLionel Sambuc #include "arch_proto.h"
41433d6423SLionel Sambuc
42433d6423SLionel Sambuc #include <minix/syslib.h>
43433d6423SLionel Sambuc
44433d6423SLionel Sambuc /* Scheduling and message passing functions */
45433d6423SLionel Sambuc static void idle(void);
46433d6423SLionel Sambuc /**
47433d6423SLionel Sambuc * Made public for use in clock.c (for user-space scheduling)
48433d6423SLionel Sambuc static int mini_send(struct proc *caller_ptr, endpoint_t dst_e, message
49433d6423SLionel Sambuc *m_ptr, int flags);
50433d6423SLionel Sambuc */
51433d6423SLionel Sambuc static int mini_receive(struct proc *caller_ptr, endpoint_t src,
52c8a9900bSCristiano Giuffrida message *m_buff_usr, int flags);
53433d6423SLionel Sambuc static int mini_senda(struct proc *caller_ptr, asynmsg_t *table, size_t
54433d6423SLionel Sambuc size);
55433d6423SLionel Sambuc static int deadlock(int function, register struct proc *caller,
56433d6423SLionel Sambuc endpoint_t src_dst_e);
57433d6423SLionel Sambuc static int try_async(struct proc *caller_ptr);
58c8a9900bSCristiano Giuffrida static int try_one(endpoint_t receive_e, struct proc *src_ptr,
59c8a9900bSCristiano Giuffrida struct proc *dst_ptr);
60433d6423SLionel Sambuc static struct proc * pick_proc(void);
61433d6423SLionel Sambuc static void enqueue_head(struct proc *rp);
62433d6423SLionel Sambuc
63433d6423SLionel Sambuc /* all idles share the same idle_priv structure */
64433d6423SLionel Sambuc static struct priv idle_priv;
65433d6423SLionel Sambuc
set_idle_name(char * name,int n)66433d6423SLionel Sambuc static void set_idle_name(char * name, int n)
67433d6423SLionel Sambuc {
68433d6423SLionel Sambuc int i, c;
69433d6423SLionel Sambuc int p_z = 0;
70433d6423SLionel Sambuc
71433d6423SLionel Sambuc if (n > 999)
72433d6423SLionel Sambuc n = 999;
73433d6423SLionel Sambuc
74433d6423SLionel Sambuc name[0] = 'i';
75433d6423SLionel Sambuc name[1] = 'd';
76433d6423SLionel Sambuc name[2] = 'l';
77433d6423SLionel Sambuc name[3] = 'e';
78433d6423SLionel Sambuc
79433d6423SLionel Sambuc for (i = 4, c = 100; c > 0; c /= 10) {
80433d6423SLionel Sambuc int digit;
81433d6423SLionel Sambuc
82433d6423SLionel Sambuc digit = n / c;
83433d6423SLionel Sambuc n -= digit * c;
84433d6423SLionel Sambuc
85433d6423SLionel Sambuc if (p_z || digit != 0 || c == 1) {
86433d6423SLionel Sambuc p_z = 1;
87433d6423SLionel Sambuc name[i++] = '0' + digit;
88433d6423SLionel Sambuc }
89433d6423SLionel Sambuc }
90433d6423SLionel Sambuc
91433d6423SLionel Sambuc name[i] = '\0';
92433d6423SLionel Sambuc
93433d6423SLionel Sambuc }
94433d6423SLionel Sambuc
95433d6423SLionel Sambuc
96433d6423SLionel Sambuc #define PICK_ANY 1
97433d6423SLionel Sambuc #define PICK_HIGHERONLY 2
98433d6423SLionel Sambuc
99433d6423SLionel Sambuc #define BuildNotifyMessage(m_ptr, src, dst_ptr) \
100433d6423SLionel Sambuc memset((m_ptr), 0, sizeof(*(m_ptr))); \
101433d6423SLionel Sambuc (m_ptr)->m_type = NOTIFY_MESSAGE; \
102433d6423SLionel Sambuc (m_ptr)->m_notify.timestamp = get_monotonic(); \
103433d6423SLionel Sambuc switch (src) { \
104433d6423SLionel Sambuc case HARDWARE: \
105433d6423SLionel Sambuc (m_ptr)->m_notify.interrupts = \
106433d6423SLionel Sambuc priv(dst_ptr)->s_int_pending; \
107433d6423SLionel Sambuc priv(dst_ptr)->s_int_pending = 0; \
108433d6423SLionel Sambuc break; \
109433d6423SLionel Sambuc case SYSTEM: \
110433d6423SLionel Sambuc memcpy(&(m_ptr)->m_notify.sigset, \
111433d6423SLionel Sambuc &priv(dst_ptr)->s_sig_pending, \
112433d6423SLionel Sambuc sizeof(sigset_t)); \
113433d6423SLionel Sambuc sigemptyset(&priv(dst_ptr)->s_sig_pending); \
114433d6423SLionel Sambuc break; \
115433d6423SLionel Sambuc }
116433d6423SLionel Sambuc
117c8a9900bSCristiano Giuffrida static message m_notify_buff = { 0, NOTIFY_MESSAGE };
118c8a9900bSCristiano Giuffrida
proc_init(void)119433d6423SLionel Sambuc void proc_init(void)
120433d6423SLionel Sambuc {
121433d6423SLionel Sambuc struct proc * rp;
122433d6423SLionel Sambuc struct priv *sp;
123433d6423SLionel Sambuc int i;
124433d6423SLionel Sambuc
125433d6423SLionel Sambuc /* Clear the process table. Announce each slot as empty and set up
126433d6423SLionel Sambuc * mappings for proc_addr() and proc_nr() macros. Do the same for the
127433d6423SLionel Sambuc * table with privilege structures for the system processes.
128433d6423SLionel Sambuc */
129433d6423SLionel Sambuc for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) {
130433d6423SLionel Sambuc rp->p_rts_flags = RTS_SLOT_FREE;/* initialize free slot */
131433d6423SLionel Sambuc rp->p_magic = PMAGIC;
132433d6423SLionel Sambuc rp->p_nr = i; /* proc number from ptr */
133433d6423SLionel Sambuc rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */
134433d6423SLionel Sambuc rp->p_scheduler = NULL; /* no user space scheduler */
135433d6423SLionel Sambuc rp->p_priority = 0; /* no priority */
136433d6423SLionel Sambuc rp->p_quantum_size_ms = 0; /* no quantum size */
137433d6423SLionel Sambuc
138433d6423SLionel Sambuc /* arch-specific initialization */
139433d6423SLionel Sambuc arch_proc_reset(rp);
140433d6423SLionel Sambuc }
141433d6423SLionel Sambuc for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) {
142433d6423SLionel Sambuc sp->s_proc_nr = NONE; /* initialize as free */
143433d6423SLionel Sambuc sp->s_id = (sys_id_t) i; /* priv structure index */
144433d6423SLionel Sambuc ppriv_addr[i] = sp; /* priv ptr from number */
145433d6423SLionel Sambuc sp->s_sig_mgr = NONE; /* clear signal managers */
146433d6423SLionel Sambuc sp->s_bak_sig_mgr = NONE;
147433d6423SLionel Sambuc }
148433d6423SLionel Sambuc
149433d6423SLionel Sambuc idle_priv.s_flags = IDL_F;
150433d6423SLionel Sambuc /* initialize IDLE structures for every CPU */
151433d6423SLionel Sambuc for (i = 0; i < CONFIG_MAX_CPUS; i++) {
152433d6423SLionel Sambuc struct proc * ip = get_cpu_var_ptr(i, idle_proc);
153433d6423SLionel Sambuc ip->p_endpoint = IDLE;
154433d6423SLionel Sambuc ip->p_priv = &idle_priv;
155433d6423SLionel Sambuc /* must not let idle ever get scheduled */
156433d6423SLionel Sambuc ip->p_rts_flags |= RTS_PROC_STOP;
157433d6423SLionel Sambuc set_idle_name(ip->p_name, i);
158433d6423SLionel Sambuc }
159433d6423SLionel Sambuc }
160433d6423SLionel Sambuc
switch_address_space_idle(void)161433d6423SLionel Sambuc static void switch_address_space_idle(void)
162433d6423SLionel Sambuc {
163433d6423SLionel Sambuc #ifdef CONFIG_SMP
164433d6423SLionel Sambuc /*
165433d6423SLionel Sambuc * currently we bet that VM is always alive and its pages available so
166433d6423SLionel Sambuc * when the CPU wakes up the kernel is mapped and no surprises happen.
167433d6423SLionel Sambuc * This is only a problem if more than 1 cpus are available
168433d6423SLionel Sambuc */
169433d6423SLionel Sambuc switch_address_space(proc_addr(VM_PROC_NR));
170433d6423SLionel Sambuc #endif
171433d6423SLionel Sambuc }
172433d6423SLionel Sambuc
173433d6423SLionel Sambuc /*===========================================================================*
174433d6423SLionel Sambuc * idle *
175433d6423SLionel Sambuc *===========================================================================*/
idle(void)176433d6423SLionel Sambuc static void idle(void)
177433d6423SLionel Sambuc {
178433d6423SLionel Sambuc struct proc * p;
179433d6423SLionel Sambuc
180433d6423SLionel Sambuc /* This function is called whenever there is no work to do.
181433d6423SLionel Sambuc * Halt the CPU, and measure how many timestamp counter ticks are
182433d6423SLionel Sambuc * spent not doing anything. This allows test setups to measure
183433d6423SLionel Sambuc * the CPU utilization of certain workloads with high precision.
184433d6423SLionel Sambuc */
185433d6423SLionel Sambuc
186433d6423SLionel Sambuc p = get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc);
187433d6423SLionel Sambuc if (priv(p)->s_flags & BILLABLE)
188433d6423SLionel Sambuc get_cpulocal_var(bill_ptr) = p;
189433d6423SLionel Sambuc
190433d6423SLionel Sambuc switch_address_space_idle();
191433d6423SLionel Sambuc
192433d6423SLionel Sambuc #ifdef CONFIG_SMP
193433d6423SLionel Sambuc get_cpulocal_var(cpu_is_idle) = 1;
194433d6423SLionel Sambuc /* we don't need to keep time on APs as it is handled on the BSP */
195433d6423SLionel Sambuc if (cpuid != bsp_cpu_id)
196433d6423SLionel Sambuc stop_local_timer();
197433d6423SLionel Sambuc else
198433d6423SLionel Sambuc #endif
199433d6423SLionel Sambuc {
200433d6423SLionel Sambuc /*
201433d6423SLionel Sambuc * If the timer has expired while in kernel we must
202433d6423SLionel Sambuc * rearm it before we go to sleep
203433d6423SLionel Sambuc */
204433d6423SLionel Sambuc restart_local_timer();
205433d6423SLionel Sambuc }
206433d6423SLionel Sambuc
207433d6423SLionel Sambuc /* start accounting for the idle time */
208433d6423SLionel Sambuc context_stop(proc_addr(KERNEL));
209433d6423SLionel Sambuc #if !SPROFILE
210433d6423SLionel Sambuc halt_cpu();
211433d6423SLionel Sambuc #else
212433d6423SLionel Sambuc if (!sprofiling)
213433d6423SLionel Sambuc halt_cpu();
214433d6423SLionel Sambuc else {
215433d6423SLionel Sambuc volatile int * v;
216433d6423SLionel Sambuc
217433d6423SLionel Sambuc v = get_cpulocal_var_ptr(idle_interrupted);
218433d6423SLionel Sambuc interrupts_enable();
219433d6423SLionel Sambuc while (!*v)
220433d6423SLionel Sambuc arch_pause();
221433d6423SLionel Sambuc interrupts_disable();
222433d6423SLionel Sambuc *v = 0;
223433d6423SLionel Sambuc }
224433d6423SLionel Sambuc #endif
225433d6423SLionel Sambuc /*
226433d6423SLionel Sambuc * end of accounting for the idle task does not happen here, the kernel
227433d6423SLionel Sambuc * is handling stuff for quite a while before it gets back here!
228433d6423SLionel Sambuc */
229433d6423SLionel Sambuc }
230433d6423SLionel Sambuc
231433d6423SLionel Sambuc /*===========================================================================*
2328bab0dfaSBen Gras * vm_suspend *
2338bab0dfaSBen Gras *===========================================================================*/
vm_suspend(struct proc * caller,const struct proc * target,const vir_bytes linaddr,const vir_bytes len,const int type,const int writeflag)2348bab0dfaSBen Gras void vm_suspend(struct proc *caller, const struct proc *target,
2358bab0dfaSBen Gras const vir_bytes linaddr, const vir_bytes len, const int type,
2368bab0dfaSBen Gras const int writeflag)
2378bab0dfaSBen Gras {
2388bab0dfaSBen Gras /* This range is not OK for this process. Set parameters
2398bab0dfaSBen Gras * of the request and notify VM about the pending request.
2408bab0dfaSBen Gras */
2418bab0dfaSBen Gras assert(!RTS_ISSET(caller, RTS_VMREQUEST));
2428bab0dfaSBen Gras assert(!RTS_ISSET(target, RTS_VMREQUEST));
2438bab0dfaSBen Gras
2448bab0dfaSBen Gras RTS_SET(caller, RTS_VMREQUEST);
2458bab0dfaSBen Gras
2468bab0dfaSBen Gras caller->p_vmrequest.req_type = VMPTYPE_CHECK;
2478bab0dfaSBen Gras caller->p_vmrequest.target = target->p_endpoint;
2488bab0dfaSBen Gras caller->p_vmrequest.params.check.start = linaddr;
2498bab0dfaSBen Gras caller->p_vmrequest.params.check.length = len;
2508bab0dfaSBen Gras caller->p_vmrequest.params.check.writeflag = writeflag;
2518bab0dfaSBen Gras caller->p_vmrequest.type = type;
2528bab0dfaSBen Gras
2538bab0dfaSBen Gras /* Connect caller on vmrequest wait queue. */
2548bab0dfaSBen Gras if(!(caller->p_vmrequest.nextrequestor = vmrequest))
2558bab0dfaSBen Gras if(OK != send_sig(VM_PROC_NR, SIGKMEM))
2568bab0dfaSBen Gras panic("send_sig failed");
2578bab0dfaSBen Gras vmrequest = caller;
2588bab0dfaSBen Gras }
2598bab0dfaSBen Gras
2608bab0dfaSBen Gras /*===========================================================================*
2618bab0dfaSBen Gras * delivermsg *
2628bab0dfaSBen Gras *===========================================================================*/
delivermsg(struct proc * rp)2638bab0dfaSBen Gras static void delivermsg(struct proc *rp)
2648bab0dfaSBen Gras {
2658bab0dfaSBen Gras assert(!RTS_ISSET(rp, RTS_VMREQUEST));
2668bab0dfaSBen Gras assert(rp->p_misc_flags & MF_DELIVERMSG);
2678bab0dfaSBen Gras assert(rp->p_delivermsg.m_source != NONE);
2688bab0dfaSBen Gras
2698bab0dfaSBen Gras if (copy_msg_to_user(&rp->p_delivermsg,
2708bab0dfaSBen Gras (message *) rp->p_delivermsg_vir)) {
2718bab0dfaSBen Gras if(rp->p_misc_flags & MF_MSGFAILED) {
2728bab0dfaSBen Gras /* 2nd consecutive failure means this won't succeed */
2738bab0dfaSBen Gras printf("WARNING wrong user pointer 0x%08lx from "
2748bab0dfaSBen Gras "process %s / %d\n",
2758bab0dfaSBen Gras rp->p_delivermsg_vir,
2768bab0dfaSBen Gras rp->p_name,
2778bab0dfaSBen Gras rp->p_endpoint);
2788bab0dfaSBen Gras cause_sig(rp->p_nr, SIGSEGV);
2798bab0dfaSBen Gras } else {
2808bab0dfaSBen Gras /* 1st failure means we have to ask VM to handle it */
2818bab0dfaSBen Gras vm_suspend(rp, rp, rp->p_delivermsg_vir,
2828bab0dfaSBen Gras sizeof(message), VMSTYPE_DELIVERMSG, 1);
2838bab0dfaSBen Gras rp->p_misc_flags |= MF_MSGFAILED;
2848bab0dfaSBen Gras }
2858bab0dfaSBen Gras } else {
2868bab0dfaSBen Gras /* Indicate message has been delivered; address is 'used'. */
2878bab0dfaSBen Gras rp->p_delivermsg.m_source = NONE;
2888bab0dfaSBen Gras rp->p_misc_flags &= ~(MF_DELIVERMSG|MF_MSGFAILED);
2898bab0dfaSBen Gras
2908bab0dfaSBen Gras if(!(rp->p_misc_flags & MF_CONTEXT_SET)) {
2918bab0dfaSBen Gras rp->p_reg.retreg = OK;
2928bab0dfaSBen Gras }
2938bab0dfaSBen Gras }
2948bab0dfaSBen Gras }
2958bab0dfaSBen Gras
2968bab0dfaSBen Gras /*===========================================================================*
297433d6423SLionel Sambuc * switch_to_user *
298433d6423SLionel Sambuc *===========================================================================*/
switch_to_user(void)299433d6423SLionel Sambuc void switch_to_user(void)
300433d6423SLionel Sambuc {
301433d6423SLionel Sambuc /* This function is called an instant before proc_ptr is
302433d6423SLionel Sambuc * to be scheduled again.
303433d6423SLionel Sambuc */
304433d6423SLionel Sambuc struct proc * p;
305433d6423SLionel Sambuc #ifdef CONFIG_SMP
306433d6423SLionel Sambuc int tlb_must_refresh = 0;
307433d6423SLionel Sambuc #endif
308433d6423SLionel Sambuc
309433d6423SLionel Sambuc p = get_cpulocal_var(proc_ptr);
310433d6423SLionel Sambuc /*
311433d6423SLionel Sambuc * if the current process is still runnable check the misc flags and let
312433d6423SLionel Sambuc * it run unless it becomes not runnable in the meantime
313433d6423SLionel Sambuc */
314433d6423SLionel Sambuc if (proc_is_runnable(p))
315433d6423SLionel Sambuc goto check_misc_flags;
316433d6423SLionel Sambuc /*
317433d6423SLionel Sambuc * if a process becomes not runnable while handling the misc flags, we
318433d6423SLionel Sambuc * need to pick a new one here and start from scratch. Also if the
319433d6423SLionel Sambuc * current process wasn't runnable, we pick a new one here
320433d6423SLionel Sambuc */
321433d6423SLionel Sambuc not_runnable_pick_new:
322433d6423SLionel Sambuc if (proc_is_preempted(p)) {
323433d6423SLionel Sambuc p->p_rts_flags &= ~RTS_PREEMPTED;
324433d6423SLionel Sambuc if (proc_is_runnable(p)) {
325433d6423SLionel Sambuc if (p->p_cpu_time_left)
326433d6423SLionel Sambuc enqueue_head(p);
327433d6423SLionel Sambuc else
328433d6423SLionel Sambuc enqueue(p);
329433d6423SLionel Sambuc }
330433d6423SLionel Sambuc }
331433d6423SLionel Sambuc
332433d6423SLionel Sambuc /*
333433d6423SLionel Sambuc * if we have no process to run, set IDLE as the current process for
334433d6423SLionel Sambuc * time accounting and put the cpu in an idle state. After the next
335433d6423SLionel Sambuc * timer interrupt the execution resumes here and we can pick another
336433d6423SLionel Sambuc * process. If there is still nothing runnable we "schedule" IDLE again
337433d6423SLionel Sambuc */
338433d6423SLionel Sambuc while (!(p = pick_proc())) {
339433d6423SLionel Sambuc idle();
340433d6423SLionel Sambuc }
341433d6423SLionel Sambuc
342433d6423SLionel Sambuc /* update the global variable */
343433d6423SLionel Sambuc get_cpulocal_var(proc_ptr) = p;
344433d6423SLionel Sambuc
345433d6423SLionel Sambuc #ifdef CONFIG_SMP
346433d6423SLionel Sambuc if (p->p_misc_flags & MF_FLUSH_TLB && get_cpulocal_var(ptproc) == p)
347433d6423SLionel Sambuc tlb_must_refresh = 1;
348433d6423SLionel Sambuc #endif
349433d6423SLionel Sambuc switch_address_space(p);
350433d6423SLionel Sambuc
351433d6423SLionel Sambuc check_misc_flags:
352433d6423SLionel Sambuc
353433d6423SLionel Sambuc assert(p);
354433d6423SLionel Sambuc assert(proc_is_runnable(p));
355433d6423SLionel Sambuc while (p->p_misc_flags &
356433d6423SLionel Sambuc (MF_KCALL_RESUME | MF_DELIVERMSG |
357433d6423SLionel Sambuc MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) {
358433d6423SLionel Sambuc
359433d6423SLionel Sambuc assert(proc_is_runnable(p));
360433d6423SLionel Sambuc if (p->p_misc_flags & MF_KCALL_RESUME) {
361433d6423SLionel Sambuc kernel_call_resume(p);
362433d6423SLionel Sambuc }
363433d6423SLionel Sambuc else if (p->p_misc_flags & MF_DELIVERMSG) {
364433d6423SLionel Sambuc TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
365433d6423SLionel Sambuc p->p_name, p->p_endpoint););
366433d6423SLionel Sambuc delivermsg(p);
367433d6423SLionel Sambuc }
368433d6423SLionel Sambuc else if (p->p_misc_flags & MF_SC_DEFER) {
369433d6423SLionel Sambuc /* Perform the system call that we deferred earlier. */
370433d6423SLionel Sambuc
371433d6423SLionel Sambuc assert (!(p->p_misc_flags & MF_SC_ACTIVE));
372433d6423SLionel Sambuc
373433d6423SLionel Sambuc arch_do_syscall(p);
374433d6423SLionel Sambuc
375433d6423SLionel Sambuc /* If the process is stopped for signal delivery, and
376433d6423SLionel Sambuc * not blocked sending a message after the system call,
377433d6423SLionel Sambuc * inform PM.
378433d6423SLionel Sambuc */
379433d6423SLionel Sambuc if ((p->p_misc_flags & MF_SIG_DELAY) &&
380433d6423SLionel Sambuc !RTS_ISSET(p, RTS_SENDING))
381433d6423SLionel Sambuc sig_delay_done(p);
382433d6423SLionel Sambuc }
383433d6423SLionel Sambuc else if (p->p_misc_flags & MF_SC_TRACE) {
384433d6423SLionel Sambuc /* Trigger a system call leave event if this was a
385433d6423SLionel Sambuc * system call. We must do this after processing the
386433d6423SLionel Sambuc * other flags above, both for tracing correctness and
387433d6423SLionel Sambuc * to be able to use 'break'.
388433d6423SLionel Sambuc */
389433d6423SLionel Sambuc if (!(p->p_misc_flags & MF_SC_ACTIVE))
390433d6423SLionel Sambuc break;
391433d6423SLionel Sambuc
392433d6423SLionel Sambuc p->p_misc_flags &=
393433d6423SLionel Sambuc ~(MF_SC_TRACE | MF_SC_ACTIVE);
394433d6423SLionel Sambuc
395433d6423SLionel Sambuc /* Signal the "leave system call" event.
396433d6423SLionel Sambuc * Block the process.
397433d6423SLionel Sambuc */
398433d6423SLionel Sambuc cause_sig(proc_nr(p), SIGTRAP);
399433d6423SLionel Sambuc }
400433d6423SLionel Sambuc else if (p->p_misc_flags & MF_SC_ACTIVE) {
401433d6423SLionel Sambuc /* If MF_SC_ACTIVE was set, remove it now:
402433d6423SLionel Sambuc * we're leaving the system call.
403433d6423SLionel Sambuc */
404433d6423SLionel Sambuc p->p_misc_flags &= ~MF_SC_ACTIVE;
405433d6423SLionel Sambuc
406433d6423SLionel Sambuc break;
407433d6423SLionel Sambuc }
408433d6423SLionel Sambuc
409433d6423SLionel Sambuc /*
410433d6423SLionel Sambuc * the selected process might not be runnable anymore. We have
411433d6423SLionel Sambuc * to checkit and schedule another one
412433d6423SLionel Sambuc */
413433d6423SLionel Sambuc if (!proc_is_runnable(p))
414433d6423SLionel Sambuc goto not_runnable_pick_new;
415433d6423SLionel Sambuc }
416433d6423SLionel Sambuc /*
417433d6423SLionel Sambuc * check the quantum left before it runs again. We must do it only here
418433d6423SLionel Sambuc * as we are sure that a possible out-of-quantum message to the
419433d6423SLionel Sambuc * scheduler will not collide with the regular ipc
420433d6423SLionel Sambuc */
421433d6423SLionel Sambuc if (!p->p_cpu_time_left)
422433d6423SLionel Sambuc proc_no_time(p);
423433d6423SLionel Sambuc /*
424433d6423SLionel Sambuc * After handling the misc flags the selected process might not be
425433d6423SLionel Sambuc * runnable anymore. We have to checkit and schedule another one
426433d6423SLionel Sambuc */
427433d6423SLionel Sambuc if (!proc_is_runnable(p))
428433d6423SLionel Sambuc goto not_runnable_pick_new;
429433d6423SLionel Sambuc
430433d6423SLionel Sambuc TRACE(VF_SCHEDULING, printf("cpu %d starting %s / %d "
431433d6423SLionel Sambuc "pc 0x%08x\n",
432433d6423SLionel Sambuc cpuid, p->p_name, p->p_endpoint, p->p_reg.pc););
433433d6423SLionel Sambuc #if DEBUG_TRACE
434433d6423SLionel Sambuc p->p_schedules++;
435433d6423SLionel Sambuc #endif
436433d6423SLionel Sambuc
437433d6423SLionel Sambuc p = arch_finish_switch_to_user();
438433d6423SLionel Sambuc assert(p->p_cpu_time_left);
439433d6423SLionel Sambuc
440433d6423SLionel Sambuc context_stop(proc_addr(KERNEL));
441433d6423SLionel Sambuc
442433d6423SLionel Sambuc /* If the process isn't the owner of FPU, enable the FPU exception */
443433d6423SLionel Sambuc if (get_cpulocal_var(fpu_owner) != p)
444433d6423SLionel Sambuc enable_fpu_exception();
445433d6423SLionel Sambuc else
446433d6423SLionel Sambuc disable_fpu_exception();
447433d6423SLionel Sambuc
448433d6423SLionel Sambuc /* If MF_CONTEXT_SET is set, don't clobber process state within
449433d6423SLionel Sambuc * the kernel. The next kernel entry is OK again though.
450433d6423SLionel Sambuc */
451433d6423SLionel Sambuc p->p_misc_flags &= ~MF_CONTEXT_SET;
452433d6423SLionel Sambuc
453433d6423SLionel Sambuc #if defined(__i386__)
454433d6423SLionel Sambuc assert(p->p_seg.p_cr3 != 0);
455433d6423SLionel Sambuc #elif defined(__arm__)
456433d6423SLionel Sambuc assert(p->p_seg.p_ttbr != 0);
457433d6423SLionel Sambuc #endif
458433d6423SLionel Sambuc #ifdef CONFIG_SMP
459433d6423SLionel Sambuc if (p->p_misc_flags & MF_FLUSH_TLB) {
460433d6423SLionel Sambuc if (tlb_must_refresh)
461433d6423SLionel Sambuc refresh_tlb();
462433d6423SLionel Sambuc p->p_misc_flags &= ~MF_FLUSH_TLB;
463433d6423SLionel Sambuc }
464433d6423SLionel Sambuc #endif
465433d6423SLionel Sambuc
466433d6423SLionel Sambuc restart_local_timer();
467433d6423SLionel Sambuc
468433d6423SLionel Sambuc /*
469433d6423SLionel Sambuc * restore_user_context() carries out the actual mode switch from kernel
470433d6423SLionel Sambuc * to userspace. This function does not return
471433d6423SLionel Sambuc */
472433d6423SLionel Sambuc restore_user_context(p);
473433d6423SLionel Sambuc NOT_REACHABLE;
474433d6423SLionel Sambuc }
475433d6423SLionel Sambuc
476433d6423SLionel Sambuc /*
477433d6423SLionel Sambuc * handler for all synchronous IPC calls
478433d6423SLionel Sambuc */
do_sync_ipc(struct proc * caller_ptr,int call_nr,endpoint_t src_dst_e,message * m_ptr)479433d6423SLionel Sambuc static int do_sync_ipc(struct proc * caller_ptr, /* who made the call */
480433d6423SLionel Sambuc int call_nr, /* system call number and flags */
481433d6423SLionel Sambuc endpoint_t src_dst_e, /* src or dst of the call */
482433d6423SLionel Sambuc message *m_ptr) /* users pointer to a message */
483433d6423SLionel Sambuc {
484433d6423SLionel Sambuc int result; /* the system call's result */
485433d6423SLionel Sambuc int src_dst_p; /* Process slot number */
486433d6423SLionel Sambuc char *callname;
487433d6423SLionel Sambuc
488433d6423SLionel Sambuc /* Check destination. RECEIVE is the only call that accepts ANY (in addition
489433d6423SLionel Sambuc * to a real endpoint). The other calls (SEND, SENDREC, and NOTIFY) require an
490433d6423SLionel Sambuc * endpoint to corresponds to a process. In addition, it is necessary to check
491433d6423SLionel Sambuc * whether a process is allowed to send to a given destination.
492433d6423SLionel Sambuc */
493433d6423SLionel Sambuc assert(call_nr != SENDA);
494433d6423SLionel Sambuc
495433d6423SLionel Sambuc /* Only allow non-negative call_nr values less than 32 */
496433d6423SLionel Sambuc if (call_nr < 0 || call_nr > IPCNO_HIGHEST || call_nr >= 32
497433d6423SLionel Sambuc || !(callname = ipc_call_names[call_nr])) {
498433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
499433d6423SLionel Sambuc printf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
500433d6423SLionel Sambuc call_nr, proc_nr(caller_ptr), src_dst_e);
501433d6423SLionel Sambuc #endif
502433d6423SLionel Sambuc return(ETRAPDENIED); /* trap denied by mask or kernel */
503433d6423SLionel Sambuc }
504433d6423SLionel Sambuc
505433d6423SLionel Sambuc if (src_dst_e == ANY)
506433d6423SLionel Sambuc {
507433d6423SLionel Sambuc if (call_nr != RECEIVE)
508433d6423SLionel Sambuc {
509433d6423SLionel Sambuc #if 0
510433d6423SLionel Sambuc printf("sys_call: %s by %d with bad endpoint %d\n",
511433d6423SLionel Sambuc callname,
512433d6423SLionel Sambuc proc_nr(caller_ptr), src_dst_e);
513433d6423SLionel Sambuc #endif
514433d6423SLionel Sambuc return EINVAL;
515433d6423SLionel Sambuc }
516433d6423SLionel Sambuc src_dst_p = (int) src_dst_e;
517433d6423SLionel Sambuc }
518433d6423SLionel Sambuc else
519433d6423SLionel Sambuc {
520433d6423SLionel Sambuc /* Require a valid source and/or destination process. */
521433d6423SLionel Sambuc if(!isokendpt(src_dst_e, &src_dst_p)) {
522433d6423SLionel Sambuc #if 0
523433d6423SLionel Sambuc printf("sys_call: %s by %d with bad endpoint %d\n",
524433d6423SLionel Sambuc callname,
525433d6423SLionel Sambuc proc_nr(caller_ptr), src_dst_e);
526433d6423SLionel Sambuc #endif
527433d6423SLionel Sambuc return EDEADSRCDST;
528433d6423SLionel Sambuc }
529433d6423SLionel Sambuc
530433d6423SLionel Sambuc /* If the call is to send to a process, i.e., for SEND, SENDNB,
531433d6423SLionel Sambuc * SENDREC or NOTIFY, verify that the caller is allowed to send to
532433d6423SLionel Sambuc * the given destination.
533433d6423SLionel Sambuc */
534433d6423SLionel Sambuc if (call_nr != RECEIVE)
535433d6423SLionel Sambuc {
536433d6423SLionel Sambuc if (!may_send_to(caller_ptr, src_dst_p)) {
537433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
538433d6423SLionel Sambuc printf(
539433d6423SLionel Sambuc "sys_call: ipc mask denied %s from %d to %d\n",
540433d6423SLionel Sambuc callname,
541433d6423SLionel Sambuc caller_ptr->p_endpoint, src_dst_e);
542433d6423SLionel Sambuc #endif
543433d6423SLionel Sambuc return(ECALLDENIED); /* call denied by ipc mask */
544433d6423SLionel Sambuc }
545433d6423SLionel Sambuc }
546433d6423SLionel Sambuc }
547433d6423SLionel Sambuc
548433d6423SLionel Sambuc /* Check if the process has privileges for the requested call. Calls to the
549433d6423SLionel Sambuc * kernel may only be SENDREC, because tasks always reply and may not block
550433d6423SLionel Sambuc * if the caller doesn't do receive().
551433d6423SLionel Sambuc */
552433d6423SLionel Sambuc if (!(priv(caller_ptr)->s_trap_mask & (1 << call_nr))) {
553433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
554433d6423SLionel Sambuc printf("sys_call: %s not allowed, caller %d, src_dst %d\n",
555433d6423SLionel Sambuc callname, proc_nr(caller_ptr), src_dst_p);
556433d6423SLionel Sambuc #endif
557433d6423SLionel Sambuc return(ETRAPDENIED); /* trap denied by mask or kernel */
558433d6423SLionel Sambuc }
559433d6423SLionel Sambuc
560433d6423SLionel Sambuc if (call_nr != SENDREC && call_nr != RECEIVE && iskerneln(src_dst_p)) {
561433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
562433d6423SLionel Sambuc printf("sys_call: trap %s not allowed, caller %d, src_dst %d\n",
563433d6423SLionel Sambuc callname, proc_nr(caller_ptr), src_dst_e);
564433d6423SLionel Sambuc #endif
565433d6423SLionel Sambuc return(ETRAPDENIED); /* trap denied by mask or kernel */
566433d6423SLionel Sambuc }
567433d6423SLionel Sambuc
568433d6423SLionel Sambuc switch(call_nr) {
569433d6423SLionel Sambuc case SENDREC:
570433d6423SLionel Sambuc /* A flag is set so that notifications cannot interrupt SENDREC. */
571433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_REPLY_PEND;
572433d6423SLionel Sambuc /* fall through */
573433d6423SLionel Sambuc case SEND:
574433d6423SLionel Sambuc result = mini_send(caller_ptr, src_dst_e, m_ptr, 0);
575433d6423SLionel Sambuc if (call_nr == SEND || result != OK)
576433d6423SLionel Sambuc break; /* done, or SEND failed */
577433d6423SLionel Sambuc /* fall through for SENDREC */
578433d6423SLionel Sambuc case RECEIVE:
579433d6423SLionel Sambuc if (call_nr == RECEIVE) {
580433d6423SLionel Sambuc caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
581433d6423SLionel Sambuc IPC_STATUS_CLEAR(caller_ptr); /* clear IPC status code */
582433d6423SLionel Sambuc }
583433d6423SLionel Sambuc result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0);
584433d6423SLionel Sambuc break;
585433d6423SLionel Sambuc case NOTIFY:
586433d6423SLionel Sambuc result = mini_notify(caller_ptr, src_dst_e);
587433d6423SLionel Sambuc break;
588433d6423SLionel Sambuc case SENDNB:
589433d6423SLionel Sambuc result = mini_send(caller_ptr, src_dst_e, m_ptr, NON_BLOCKING);
590433d6423SLionel Sambuc break;
591433d6423SLionel Sambuc default:
592433d6423SLionel Sambuc result = EBADCALL; /* illegal system call */
593433d6423SLionel Sambuc }
594433d6423SLionel Sambuc
595433d6423SLionel Sambuc /* Now, return the result of the system call to the caller. */
596433d6423SLionel Sambuc return(result);
597433d6423SLionel Sambuc }
598433d6423SLionel Sambuc
do_ipc(reg_t r1,reg_t r2,reg_t r3)599433d6423SLionel Sambuc int do_ipc(reg_t r1, reg_t r2, reg_t r3)
600433d6423SLionel Sambuc {
601433d6423SLionel Sambuc struct proc *const caller_ptr = get_cpulocal_var(proc_ptr); /* get pointer to caller */
602433d6423SLionel Sambuc int call_nr = (int) r1;
603433d6423SLionel Sambuc
604433d6423SLionel Sambuc assert(!RTS_ISSET(caller_ptr, RTS_SLOT_FREE));
605433d6423SLionel Sambuc
606433d6423SLionel Sambuc /* bill kernel time to this process. */
607433d6423SLionel Sambuc kbill_ipc = caller_ptr;
608433d6423SLionel Sambuc
609433d6423SLionel Sambuc /* If this process is subject to system call tracing, handle that first. */
610433d6423SLionel Sambuc if (caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) {
611433d6423SLionel Sambuc /* Are we tracing this process, and is it the first sys_call entry? */
612433d6423SLionel Sambuc if ((caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) ==
613433d6423SLionel Sambuc MF_SC_TRACE) {
614433d6423SLionel Sambuc /* We must notify the tracer before processing the actual
615433d6423SLionel Sambuc * system call. If we don't, the tracer could not obtain the
616433d6423SLionel Sambuc * input message. Postpone the entire system call.
617433d6423SLionel Sambuc */
618433d6423SLionel Sambuc caller_ptr->p_misc_flags &= ~MF_SC_TRACE;
619433d6423SLionel Sambuc assert(!(caller_ptr->p_misc_flags & MF_SC_DEFER));
620433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_SC_DEFER;
621433d6423SLionel Sambuc caller_ptr->p_defer.r1 = r1;
622433d6423SLionel Sambuc caller_ptr->p_defer.r2 = r2;
623433d6423SLionel Sambuc caller_ptr->p_defer.r3 = r3;
624433d6423SLionel Sambuc
625433d6423SLionel Sambuc /* Signal the "enter system call" event. Block the process. */
626433d6423SLionel Sambuc cause_sig(proc_nr(caller_ptr), SIGTRAP);
627433d6423SLionel Sambuc
628433d6423SLionel Sambuc /* Preserve the return register's value. */
629433d6423SLionel Sambuc return caller_ptr->p_reg.retreg;
630433d6423SLionel Sambuc }
631433d6423SLionel Sambuc
632433d6423SLionel Sambuc /* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
633433d6423SLionel Sambuc caller_ptr->p_misc_flags &= ~MF_SC_DEFER;
634433d6423SLionel Sambuc
635433d6423SLionel Sambuc assert (!(caller_ptr->p_misc_flags & MF_SC_ACTIVE));
636433d6423SLionel Sambuc
637433d6423SLionel Sambuc /* Set a flag to allow reliable tracing of leaving the system call. */
638433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_SC_ACTIVE;
639433d6423SLionel Sambuc }
640433d6423SLionel Sambuc
641433d6423SLionel Sambuc if(caller_ptr->p_misc_flags & MF_DELIVERMSG) {
642433d6423SLionel Sambuc panic("sys_call: MF_DELIVERMSG on for %s / %d\n",
643433d6423SLionel Sambuc caller_ptr->p_name, caller_ptr->p_endpoint);
644433d6423SLionel Sambuc }
645433d6423SLionel Sambuc
646433d6423SLionel Sambuc /* Now check if the call is known and try to perform the request. The only
647433d6423SLionel Sambuc * system calls that exist in MINIX are sending and receiving messages.
648433d6423SLionel Sambuc * - SENDREC: combines SEND and RECEIVE in a single system call
649433d6423SLionel Sambuc * - SEND: sender blocks until its message has been delivered
650433d6423SLionel Sambuc * - RECEIVE: receiver blocks until an acceptable message has arrived
651433d6423SLionel Sambuc * - NOTIFY: asynchronous call; deliver notification or mark pending
652433d6423SLionel Sambuc * - SENDA: list of asynchronous send requests
653433d6423SLionel Sambuc */
654433d6423SLionel Sambuc switch(call_nr) {
655433d6423SLionel Sambuc case SENDREC:
656433d6423SLionel Sambuc case SEND:
657433d6423SLionel Sambuc case RECEIVE:
658433d6423SLionel Sambuc case NOTIFY:
659433d6423SLionel Sambuc case SENDNB:
660433d6423SLionel Sambuc {
661433d6423SLionel Sambuc /* Process accounting for scheduling */
662433d6423SLionel Sambuc caller_ptr->p_accounting.ipc_sync++;
663433d6423SLionel Sambuc
664433d6423SLionel Sambuc return do_sync_ipc(caller_ptr, call_nr, (endpoint_t) r2,
665433d6423SLionel Sambuc (message *) r3);
666433d6423SLionel Sambuc }
667433d6423SLionel Sambuc case SENDA:
668433d6423SLionel Sambuc {
669433d6423SLionel Sambuc /*
670433d6423SLionel Sambuc * Get and check the size of the argument in bytes as it is a
671433d6423SLionel Sambuc * table
672433d6423SLionel Sambuc */
673433d6423SLionel Sambuc size_t msg_size = (size_t) r2;
674433d6423SLionel Sambuc
675433d6423SLionel Sambuc /* Process accounting for scheduling */
676433d6423SLionel Sambuc caller_ptr->p_accounting.ipc_async++;
677433d6423SLionel Sambuc
678433d6423SLionel Sambuc /* Limit size to something reasonable. An arbitrary choice is 16
679433d6423SLionel Sambuc * times the number of process table entries.
680433d6423SLionel Sambuc */
681433d6423SLionel Sambuc if (msg_size > 16*(NR_TASKS + NR_PROCS))
682433d6423SLionel Sambuc return EDOM;
683433d6423SLionel Sambuc return mini_senda(caller_ptr, (asynmsg_t *) r3, msg_size);
684433d6423SLionel Sambuc }
685433d6423SLionel Sambuc case MINIX_KERNINFO:
686433d6423SLionel Sambuc {
687433d6423SLionel Sambuc /* It might not be initialized yet. */
688433d6423SLionel Sambuc if(!minix_kerninfo_user) {
689433d6423SLionel Sambuc return EBADCALL;
690433d6423SLionel Sambuc }
691433d6423SLionel Sambuc
692433d6423SLionel Sambuc arch_set_secondary_ipc_return(caller_ptr, minix_kerninfo_user);
693433d6423SLionel Sambuc return OK;
694433d6423SLionel Sambuc }
695433d6423SLionel Sambuc default:
696433d6423SLionel Sambuc return EBADCALL; /* illegal system call */
697433d6423SLionel Sambuc }
698433d6423SLionel Sambuc }
699433d6423SLionel Sambuc
700433d6423SLionel Sambuc /*===========================================================================*
701433d6423SLionel Sambuc * deadlock *
702433d6423SLionel Sambuc *===========================================================================*/
deadlock(int function,register struct proc * cp,endpoint_t src_dst_e)7036077d1adSDr. Florian Grätz static int deadlock(
7046077d1adSDr. Florian Grätz int function, /* trap number */
7056077d1adSDr. Florian Grätz register struct proc *cp, /* pointer to caller */
7066077d1adSDr. Florian Grätz endpoint_t src_dst_e /* src or dst process */
7076077d1adSDr. Florian Grätz )
708433d6423SLionel Sambuc {
709433d6423SLionel Sambuc /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
710433d6423SLionel Sambuc * a cyclic dependency of blocking send and receive calls. The only cyclic
711433d6423SLionel Sambuc * dependency that is not fatal is if the caller and target directly SEND(REC)
712433d6423SLionel Sambuc * and RECEIVE to each other. If a deadlock is found, the group size is
713433d6423SLionel Sambuc * returned. Otherwise zero is returned.
714433d6423SLionel Sambuc */
715433d6423SLionel Sambuc register struct proc *xp; /* process pointer */
716433d6423SLionel Sambuc int group_size = 1; /* start with only caller */
717433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
718433d6423SLionel Sambuc static struct proc *processes[NR_PROCS + NR_TASKS];
719433d6423SLionel Sambuc processes[0] = cp;
720433d6423SLionel Sambuc #endif
721433d6423SLionel Sambuc
722433d6423SLionel Sambuc while (src_dst_e != ANY) { /* check while process nr */
723433d6423SLionel Sambuc int src_dst_slot;
724433d6423SLionel Sambuc okendpt(src_dst_e, &src_dst_slot);
725433d6423SLionel Sambuc xp = proc_addr(src_dst_slot); /* follow chain of processes */
726433d6423SLionel Sambuc assert(proc_ptr_ok(xp));
727433d6423SLionel Sambuc assert(!RTS_ISSET(xp, RTS_SLOT_FREE));
728433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
729433d6423SLionel Sambuc processes[group_size] = xp;
730433d6423SLionel Sambuc #endif
731433d6423SLionel Sambuc group_size ++; /* extra process in group */
732433d6423SLionel Sambuc
733433d6423SLionel Sambuc /* Check whether the last process in the chain has a dependency. If it
734433d6423SLionel Sambuc * has not, the cycle cannot be closed and we are done.
735433d6423SLionel Sambuc */
736433d6423SLionel Sambuc if((src_dst_e = P_BLOCKEDON(xp)) == NONE)
737433d6423SLionel Sambuc return 0;
738433d6423SLionel Sambuc
739433d6423SLionel Sambuc /* Now check if there is a cyclic dependency. For group sizes of two,
740433d6423SLionel Sambuc * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
741433d6423SLionel Sambuc * or other combinations indicate a deadlock.
742433d6423SLionel Sambuc */
743433d6423SLionel Sambuc if (src_dst_e == cp->p_endpoint) { /* possible deadlock */
744433d6423SLionel Sambuc if (group_size == 2) { /* caller and src_dst */
745433d6423SLionel Sambuc /* The function number is magically converted to flags. */
746433d6423SLionel Sambuc if ((xp->p_rts_flags ^ (function << 2)) & RTS_SENDING) {
747433d6423SLionel Sambuc return(0); /* not a deadlock */
748433d6423SLionel Sambuc }
749433d6423SLionel Sambuc }
750433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
751433d6423SLionel Sambuc {
752433d6423SLionel Sambuc int i;
753433d6423SLionel Sambuc printf("deadlock between these processes:\n");
754433d6423SLionel Sambuc for(i = 0; i < group_size; i++) {
755433d6423SLionel Sambuc printf(" %10s ", processes[i]->p_name);
756433d6423SLionel Sambuc }
757433d6423SLionel Sambuc printf("\n\n");
758433d6423SLionel Sambuc for(i = 0; i < group_size; i++) {
759433d6423SLionel Sambuc print_proc(processes[i]);
760433d6423SLionel Sambuc proc_stacktrace(processes[i]);
761433d6423SLionel Sambuc }
762433d6423SLionel Sambuc }
763433d6423SLionel Sambuc #endif
764433d6423SLionel Sambuc return(group_size); /* deadlock found */
765433d6423SLionel Sambuc }
766433d6423SLionel Sambuc }
767433d6423SLionel Sambuc return(0); /* not a deadlock */
768433d6423SLionel Sambuc }
769433d6423SLionel Sambuc
770433d6423SLionel Sambuc /*===========================================================================*
771433d6423SLionel Sambuc * has_pending *
772433d6423SLionel Sambuc *===========================================================================*/
has_pending(sys_map_t * map,int src_p,int asynm)773433d6423SLionel Sambuc static int has_pending(sys_map_t *map, int src_p, int asynm)
774433d6423SLionel Sambuc {
775433d6423SLionel Sambuc /* Check to see if there is a pending message from the desired source
776433d6423SLionel Sambuc * available.
777433d6423SLionel Sambuc */
778433d6423SLionel Sambuc
779433d6423SLionel Sambuc int src_id;
780433d6423SLionel Sambuc sys_id_t id = NULL_PRIV_ID;
781433d6423SLionel Sambuc #ifdef CONFIG_SMP
782433d6423SLionel Sambuc struct proc * p;
783433d6423SLionel Sambuc #endif
784433d6423SLionel Sambuc
785433d6423SLionel Sambuc /* Either check a specific bit in the mask map, or find the first bit set in
786433d6423SLionel Sambuc * it (if any), depending on whether the receive was called on a specific
787433d6423SLionel Sambuc * source endpoint.
788433d6423SLionel Sambuc */
789433d6423SLionel Sambuc if (src_p != ANY) {
790433d6423SLionel Sambuc src_id = nr_to_id(src_p);
791433d6423SLionel Sambuc if (get_sys_bit(*map, src_id)) {
792433d6423SLionel Sambuc #ifdef CONFIG_SMP
793433d6423SLionel Sambuc p = proc_addr(id_to_nr(src_id));
794433d6423SLionel Sambuc if (asynm && RTS_ISSET(p, RTS_VMINHIBIT))
795433d6423SLionel Sambuc p->p_misc_flags |= MF_SENDA_VM_MISS;
796433d6423SLionel Sambuc else
797433d6423SLionel Sambuc #endif
798433d6423SLionel Sambuc id = src_id;
799433d6423SLionel Sambuc }
800433d6423SLionel Sambuc } else {
801433d6423SLionel Sambuc /* Find a source with a pending message */
802433d6423SLionel Sambuc for (src_id = 0; src_id < NR_SYS_PROCS; src_id += BITCHUNK_BITS) {
803433d6423SLionel Sambuc if (get_sys_bits(*map, src_id) != 0) {
804433d6423SLionel Sambuc #ifdef CONFIG_SMP
805433d6423SLionel Sambuc while (src_id < NR_SYS_PROCS) {
806433d6423SLionel Sambuc while (!get_sys_bit(*map, src_id)) {
807433d6423SLionel Sambuc if (src_id == NR_SYS_PROCS)
808433d6423SLionel Sambuc goto quit_search;
809433d6423SLionel Sambuc src_id++;
810433d6423SLionel Sambuc }
811433d6423SLionel Sambuc p = proc_addr(id_to_nr(src_id));
812433d6423SLionel Sambuc /*
813433d6423SLionel Sambuc * We must not let kernel fiddle with pages of a
814433d6423SLionel Sambuc * process which are currently being changed by
815433d6423SLionel Sambuc * VM. It is dangerous! So do not report such a
816433d6423SLionel Sambuc * process as having pending async messages.
817433d6423SLionel Sambuc * Skip it.
818433d6423SLionel Sambuc */
819433d6423SLionel Sambuc if (asynm && RTS_ISSET(p, RTS_VMINHIBIT)) {
820433d6423SLionel Sambuc p->p_misc_flags |= MF_SENDA_VM_MISS;
821433d6423SLionel Sambuc src_id++;
822433d6423SLionel Sambuc } else
823433d6423SLionel Sambuc goto quit_search;
824433d6423SLionel Sambuc }
825433d6423SLionel Sambuc #else
826433d6423SLionel Sambuc while (!get_sys_bit(*map, src_id)) src_id++;
827433d6423SLionel Sambuc goto quit_search;
828433d6423SLionel Sambuc #endif
829433d6423SLionel Sambuc }
830433d6423SLionel Sambuc }
831433d6423SLionel Sambuc
832433d6423SLionel Sambuc quit_search:
833433d6423SLionel Sambuc if (src_id < NR_SYS_PROCS) /* Found one */
834433d6423SLionel Sambuc id = src_id;
835433d6423SLionel Sambuc }
836433d6423SLionel Sambuc
837433d6423SLionel Sambuc return(id);
838433d6423SLionel Sambuc }
839433d6423SLionel Sambuc
840433d6423SLionel Sambuc /*===========================================================================*
841433d6423SLionel Sambuc * has_pending_notify *
842433d6423SLionel Sambuc *===========================================================================*/
has_pending_notify(struct proc * caller,int src_p)843433d6423SLionel Sambuc int has_pending_notify(struct proc * caller, int src_p)
844433d6423SLionel Sambuc {
845433d6423SLionel Sambuc sys_map_t * map = &priv(caller)->s_notify_pending;
846433d6423SLionel Sambuc return has_pending(map, src_p, 0);
847433d6423SLionel Sambuc }
848433d6423SLionel Sambuc
849433d6423SLionel Sambuc /*===========================================================================*
850433d6423SLionel Sambuc * has_pending_asend *
851433d6423SLionel Sambuc *===========================================================================*/
has_pending_asend(struct proc * caller,int src_p)852433d6423SLionel Sambuc int has_pending_asend(struct proc * caller, int src_p)
853433d6423SLionel Sambuc {
854433d6423SLionel Sambuc sys_map_t * map = &priv(caller)->s_asyn_pending;
855433d6423SLionel Sambuc return has_pending(map, src_p, 1);
856433d6423SLionel Sambuc }
857433d6423SLionel Sambuc
858433d6423SLionel Sambuc /*===========================================================================*
859433d6423SLionel Sambuc * unset_notify_pending *
860433d6423SLionel Sambuc *===========================================================================*/
unset_notify_pending(struct proc * caller,int src_p)861433d6423SLionel Sambuc void unset_notify_pending(struct proc * caller, int src_p)
862433d6423SLionel Sambuc {
863433d6423SLionel Sambuc sys_map_t * map = &priv(caller)->s_notify_pending;
864433d6423SLionel Sambuc unset_sys_bit(*map, src_p);
865433d6423SLionel Sambuc }
866433d6423SLionel Sambuc
867433d6423SLionel Sambuc /*===========================================================================*
868433d6423SLionel Sambuc * mini_send *
869433d6423SLionel Sambuc *===========================================================================*/
mini_send(register struct proc * caller_ptr,endpoint_t dst_e,message * m_ptr,const int flags)870433d6423SLionel Sambuc int mini_send(
871433d6423SLionel Sambuc register struct proc *caller_ptr, /* who is trying to send a message? */
872433d6423SLionel Sambuc endpoint_t dst_e, /* to whom is message being sent? */
873433d6423SLionel Sambuc message *m_ptr, /* pointer to message buffer */
874433d6423SLionel Sambuc const int flags
875433d6423SLionel Sambuc )
876433d6423SLionel Sambuc {
877433d6423SLionel Sambuc /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
878433d6423SLionel Sambuc * for this message, copy the message to it and unblock 'dst'. If 'dst' is
879433d6423SLionel Sambuc * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
880433d6423SLionel Sambuc */
881433d6423SLionel Sambuc register struct proc *dst_ptr;
882433d6423SLionel Sambuc register struct proc **xpp;
883433d6423SLionel Sambuc int dst_p;
884433d6423SLionel Sambuc dst_p = _ENDPOINT_P(dst_e);
885433d6423SLionel Sambuc dst_ptr = proc_addr(dst_p);
886433d6423SLionel Sambuc
887433d6423SLionel Sambuc if (RTS_ISSET(dst_ptr, RTS_NO_ENDPOINT))
888433d6423SLionel Sambuc {
889433d6423SLionel Sambuc return EDEADSRCDST;
890433d6423SLionel Sambuc }
891433d6423SLionel Sambuc
892433d6423SLionel Sambuc /* Check if 'dst' is blocked waiting for this message. The destination's
893433d6423SLionel Sambuc * RTS_SENDING flag may be set when its SENDREC call blocked while sending.
894433d6423SLionel Sambuc */
895c8a9900bSCristiano Giuffrida if (WILLRECEIVE(caller_ptr->p_endpoint, dst_ptr, (vir_bytes)m_ptr, NULL)) {
896433d6423SLionel Sambuc int call;
897433d6423SLionel Sambuc /* Destination is indeed waiting for this message. */
898433d6423SLionel Sambuc assert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
899433d6423SLionel Sambuc
900433d6423SLionel Sambuc if (!(flags & FROM_KERNEL)) {
901433d6423SLionel Sambuc if(copy_msg_from_user(m_ptr, &dst_ptr->p_delivermsg))
902433d6423SLionel Sambuc return EFAULT;
903433d6423SLionel Sambuc } else {
904433d6423SLionel Sambuc dst_ptr->p_delivermsg = *m_ptr;
905433d6423SLionel Sambuc IPC_STATUS_ADD_FLAGS(dst_ptr, IPC_FLG_MSG_FROM_KERNEL);
906433d6423SLionel Sambuc }
907433d6423SLionel Sambuc
908433d6423SLionel Sambuc dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
909433d6423SLionel Sambuc dst_ptr->p_misc_flags |= MF_DELIVERMSG;
910433d6423SLionel Sambuc
911433d6423SLionel Sambuc call = (caller_ptr->p_misc_flags & MF_REPLY_PEND ? SENDREC
912433d6423SLionel Sambuc : (flags & NON_BLOCKING ? SENDNB : SEND));
913433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(dst_ptr, call);
914433d6423SLionel Sambuc
915433d6423SLionel Sambuc if (dst_ptr->p_misc_flags & MF_REPLY_PEND)
916433d6423SLionel Sambuc dst_ptr->p_misc_flags &= ~MF_REPLY_PEND;
917433d6423SLionel Sambuc
918433d6423SLionel Sambuc RTS_UNSET(dst_ptr, RTS_RECEIVING);
919433d6423SLionel Sambuc
920433d6423SLionel Sambuc #if DEBUG_IPC_HOOK
921433d6423SLionel Sambuc hook_ipc_msgsend(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
922433d6423SLionel Sambuc hook_ipc_msgrecv(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
923433d6423SLionel Sambuc #endif
924433d6423SLionel Sambuc } else {
925433d6423SLionel Sambuc if(flags & NON_BLOCKING) {
926433d6423SLionel Sambuc return(ENOTREADY);
927433d6423SLionel Sambuc }
928433d6423SLionel Sambuc
929433d6423SLionel Sambuc /* Check for a possible deadlock before actually blocking. */
930433d6423SLionel Sambuc if (deadlock(SEND, caller_ptr, dst_e)) {
931433d6423SLionel Sambuc return(ELOCKED);
932433d6423SLionel Sambuc }
933433d6423SLionel Sambuc
934433d6423SLionel Sambuc /* Destination is not waiting. Block and dequeue caller. */
935433d6423SLionel Sambuc if (!(flags & FROM_KERNEL)) {
936433d6423SLionel Sambuc if(copy_msg_from_user(m_ptr, &caller_ptr->p_sendmsg))
937433d6423SLionel Sambuc return EFAULT;
938433d6423SLionel Sambuc } else {
939433d6423SLionel Sambuc caller_ptr->p_sendmsg = *m_ptr;
940433d6423SLionel Sambuc /*
941433d6423SLionel Sambuc * we need to remember that this message is from kernel so we
942433d6423SLionel Sambuc * can set the delivery status flags when the message is
943433d6423SLionel Sambuc * actually delivered
944433d6423SLionel Sambuc */
945433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_SENDING_FROM_KERNEL;
946433d6423SLionel Sambuc }
947433d6423SLionel Sambuc
948433d6423SLionel Sambuc RTS_SET(caller_ptr, RTS_SENDING);
949433d6423SLionel Sambuc caller_ptr->p_sendto_e = dst_e;
950433d6423SLionel Sambuc
951433d6423SLionel Sambuc /* Process is now blocked. Put in on the destination's queue. */
952433d6423SLionel Sambuc assert(caller_ptr->p_q_link == NULL);
953433d6423SLionel Sambuc xpp = &dst_ptr->p_caller_q; /* find end of list */
954433d6423SLionel Sambuc while (*xpp) xpp = &(*xpp)->p_q_link;
955433d6423SLionel Sambuc *xpp = caller_ptr; /* add caller to end */
956433d6423SLionel Sambuc
957433d6423SLionel Sambuc #if DEBUG_IPC_HOOK
958433d6423SLionel Sambuc hook_ipc_msgsend(&caller_ptr->p_sendmsg, caller_ptr, dst_ptr);
959433d6423SLionel Sambuc #endif
960433d6423SLionel Sambuc }
961433d6423SLionel Sambuc return(OK);
962433d6423SLionel Sambuc }
963433d6423SLionel Sambuc
964433d6423SLionel Sambuc /*===========================================================================*
965433d6423SLionel Sambuc * mini_receive *
966433d6423SLionel Sambuc *===========================================================================*/
mini_receive(struct proc * caller_ptr,endpoint_t src_e,message * m_buff_usr,const int flags)967433d6423SLionel Sambuc static int mini_receive(struct proc * caller_ptr,
968433d6423SLionel Sambuc endpoint_t src_e, /* which message source is wanted */
969433d6423SLionel Sambuc message * m_buff_usr, /* pointer to message buffer */
970433d6423SLionel Sambuc const int flags)
971433d6423SLionel Sambuc {
972433d6423SLionel Sambuc /* A process or task wants to get a message. If a message is already queued,
973433d6423SLionel Sambuc * acquire it and deblock the sender. If no message from the desired source
974433d6423SLionel Sambuc * is available block the caller.
975433d6423SLionel Sambuc */
976433d6423SLionel Sambuc register struct proc **xpp;
977c8a9900bSCristiano Giuffrida int r, src_id, found, src_proc_nr, src_p;
978c8a9900bSCristiano Giuffrida endpoint_t sender_e;
979433d6423SLionel Sambuc
980433d6423SLionel Sambuc assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
981433d6423SLionel Sambuc
982433d6423SLionel Sambuc /* This is where we want our message. */
983433d6423SLionel Sambuc caller_ptr->p_delivermsg_vir = (vir_bytes) m_buff_usr;
984433d6423SLionel Sambuc
985433d6423SLionel Sambuc if(src_e == ANY) src_p = ANY;
986433d6423SLionel Sambuc else
987433d6423SLionel Sambuc {
988433d6423SLionel Sambuc okendpt(src_e, &src_p);
989433d6423SLionel Sambuc if (RTS_ISSET(proc_addr(src_p), RTS_NO_ENDPOINT))
990433d6423SLionel Sambuc {
991433d6423SLionel Sambuc return EDEADSRCDST;
992433d6423SLionel Sambuc }
993433d6423SLionel Sambuc }
994433d6423SLionel Sambuc
995433d6423SLionel Sambuc
996433d6423SLionel Sambuc /* Check to see if a message from desired source is already available. The
997433d6423SLionel Sambuc * caller's RTS_SENDING flag may be set if SENDREC couldn't send. If it is
998433d6423SLionel Sambuc * set, the process should be blocked.
999433d6423SLionel Sambuc */
1000433d6423SLionel Sambuc if (!RTS_ISSET(caller_ptr, RTS_SENDING)) {
1001433d6423SLionel Sambuc
1002433d6423SLionel Sambuc /* Check if there are pending notifications, except for SENDREC. */
1003433d6423SLionel Sambuc if (! (caller_ptr->p_misc_flags & MF_REPLY_PEND)) {
1004433d6423SLionel Sambuc
1005433d6423SLionel Sambuc /* Check for pending notifications */
1006c8a9900bSCristiano Giuffrida src_id = has_pending_notify(caller_ptr, src_p);
1007c8a9900bSCristiano Giuffrida found = src_id != NULL_PRIV_ID;
1008c8a9900bSCristiano Giuffrida if(found) {
1009433d6423SLionel Sambuc src_proc_nr = id_to_nr(src_id); /* get source proc */
1010c8a9900bSCristiano Giuffrida sender_e = proc_addr(src_proc_nr)->p_endpoint;
1011c8a9900bSCristiano Giuffrida }
1012c8a9900bSCristiano Giuffrida
1013c8a9900bSCristiano Giuffrida if (found && CANRECEIVE(src_e, sender_e, caller_ptr, 0,
1014c8a9900bSCristiano Giuffrida &m_notify_buff)) {
1015c8a9900bSCristiano Giuffrida
1016433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
1017433d6423SLionel Sambuc if(src_proc_nr == NONE) {
1018433d6423SLionel Sambuc printf("mini_receive: sending notify from NONE\n");
1019433d6423SLionel Sambuc }
1020433d6423SLionel Sambuc #endif
1021433d6423SLionel Sambuc assert(src_proc_nr != NONE);
1022433d6423SLionel Sambuc unset_notify_pending(caller_ptr, src_id); /* no longer pending */
1023433d6423SLionel Sambuc
1024433d6423SLionel Sambuc /* Found a suitable source, deliver the notification message. */
1025433d6423SLionel Sambuc assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
1026c8a9900bSCristiano Giuffrida assert(src_e == ANY || sender_e == src_e);
1027433d6423SLionel Sambuc
1028433d6423SLionel Sambuc /* assemble message */
1029433d6423SLionel Sambuc BuildNotifyMessage(&caller_ptr->p_delivermsg, src_proc_nr, caller_ptr);
1030c8a9900bSCristiano Giuffrida caller_ptr->p_delivermsg.m_source = sender_e;
1031433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_DELIVERMSG;
1032433d6423SLionel Sambuc
1033433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(caller_ptr, NOTIFY);
1034433d6423SLionel Sambuc
1035433d6423SLionel Sambuc goto receive_done;
1036433d6423SLionel Sambuc }
1037433d6423SLionel Sambuc }
1038433d6423SLionel Sambuc
1039433d6423SLionel Sambuc /* Check for pending asynchronous messages */
1040433d6423SLionel Sambuc if (has_pending_asend(caller_ptr, src_p) != NULL_PRIV_ID) {
1041433d6423SLionel Sambuc if (src_p != ANY)
1042c8a9900bSCristiano Giuffrida r = try_one(src_e, proc_addr(src_p), caller_ptr);
1043433d6423SLionel Sambuc else
1044433d6423SLionel Sambuc r = try_async(caller_ptr);
1045433d6423SLionel Sambuc
1046433d6423SLionel Sambuc if (r == OK) {
1047433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(caller_ptr, SENDA);
1048433d6423SLionel Sambuc goto receive_done;
1049433d6423SLionel Sambuc }
1050433d6423SLionel Sambuc }
1051433d6423SLionel Sambuc
1052433d6423SLionel Sambuc /* Check caller queue. Use pointer pointers to keep code simple. */
1053433d6423SLionel Sambuc xpp = &caller_ptr->p_caller_q;
1054433d6423SLionel Sambuc while (*xpp) {
1055433d6423SLionel Sambuc struct proc * sender = *xpp;
1056c8a9900bSCristiano Giuffrida endpoint_t sender_e = sender->p_endpoint;
1057433d6423SLionel Sambuc
1058c8a9900bSCristiano Giuffrida if (CANRECEIVE(src_e, sender_e, caller_ptr, 0, &sender->p_sendmsg)) {
1059433d6423SLionel Sambuc int call;
1060433d6423SLionel Sambuc assert(!RTS_ISSET(sender, RTS_SLOT_FREE));
1061433d6423SLionel Sambuc assert(!RTS_ISSET(sender, RTS_NO_ENDPOINT));
1062433d6423SLionel Sambuc
1063433d6423SLionel Sambuc /* Found acceptable message. Copy it and update status. */
1064433d6423SLionel Sambuc assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
1065433d6423SLionel Sambuc caller_ptr->p_delivermsg = sender->p_sendmsg;
1066433d6423SLionel Sambuc caller_ptr->p_delivermsg.m_source = sender->p_endpoint;
1067433d6423SLionel Sambuc caller_ptr->p_misc_flags |= MF_DELIVERMSG;
1068433d6423SLionel Sambuc RTS_UNSET(sender, RTS_SENDING);
1069433d6423SLionel Sambuc
1070433d6423SLionel Sambuc call = (sender->p_misc_flags & MF_REPLY_PEND ? SENDREC : SEND);
1071433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(caller_ptr, call);
1072433d6423SLionel Sambuc
1073433d6423SLionel Sambuc /*
1074433d6423SLionel Sambuc * if the message is originally from the kernel on behalf of this
1075433d6423SLionel Sambuc * process, we must send the status flags accordingly
1076433d6423SLionel Sambuc */
1077433d6423SLionel Sambuc if (sender->p_misc_flags & MF_SENDING_FROM_KERNEL) {
1078433d6423SLionel Sambuc IPC_STATUS_ADD_FLAGS(caller_ptr, IPC_FLG_MSG_FROM_KERNEL);
1079433d6423SLionel Sambuc /* we can clean the flag now, not need anymore */
1080433d6423SLionel Sambuc sender->p_misc_flags &= ~MF_SENDING_FROM_KERNEL;
1081433d6423SLionel Sambuc }
1082433d6423SLionel Sambuc if (sender->p_misc_flags & MF_SIG_DELAY)
1083433d6423SLionel Sambuc sig_delay_done(sender);
1084433d6423SLionel Sambuc
1085433d6423SLionel Sambuc #if DEBUG_IPC_HOOK
1086433d6423SLionel Sambuc hook_ipc_msgrecv(&caller_ptr->p_delivermsg, *xpp, caller_ptr);
1087433d6423SLionel Sambuc #endif
1088433d6423SLionel Sambuc
1089433d6423SLionel Sambuc *xpp = sender->p_q_link; /* remove from queue */
1090433d6423SLionel Sambuc sender->p_q_link = NULL;
1091433d6423SLionel Sambuc goto receive_done;
1092433d6423SLionel Sambuc }
1093433d6423SLionel Sambuc xpp = &sender->p_q_link; /* proceed to next */
1094433d6423SLionel Sambuc }
1095433d6423SLionel Sambuc }
1096433d6423SLionel Sambuc
1097433d6423SLionel Sambuc /* No suitable message is available or the caller couldn't send in SENDREC.
1098433d6423SLionel Sambuc * Block the process trying to receive, unless the flags tell otherwise.
1099433d6423SLionel Sambuc */
1100433d6423SLionel Sambuc if ( ! (flags & NON_BLOCKING)) {
1101433d6423SLionel Sambuc /* Check for a possible deadlock before actually blocking. */
1102433d6423SLionel Sambuc if (deadlock(RECEIVE, caller_ptr, src_e)) {
1103433d6423SLionel Sambuc return(ELOCKED);
1104433d6423SLionel Sambuc }
1105433d6423SLionel Sambuc
1106433d6423SLionel Sambuc caller_ptr->p_getfrom_e = src_e;
1107433d6423SLionel Sambuc RTS_SET(caller_ptr, RTS_RECEIVING);
1108433d6423SLionel Sambuc return(OK);
1109433d6423SLionel Sambuc } else {
1110433d6423SLionel Sambuc return(ENOTREADY);
1111433d6423SLionel Sambuc }
1112433d6423SLionel Sambuc
1113433d6423SLionel Sambuc receive_done:
1114433d6423SLionel Sambuc if (caller_ptr->p_misc_flags & MF_REPLY_PEND)
1115433d6423SLionel Sambuc caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
1116433d6423SLionel Sambuc return OK;
1117433d6423SLionel Sambuc }
1118433d6423SLionel Sambuc
1119433d6423SLionel Sambuc /*===========================================================================*
1120433d6423SLionel Sambuc * mini_notify *
1121433d6423SLionel Sambuc *===========================================================================*/
mini_notify(const struct proc * caller_ptr,endpoint_t dst_e)1122433d6423SLionel Sambuc int mini_notify(
1123433d6423SLionel Sambuc const struct proc *caller_ptr, /* sender of the notification */
1124433d6423SLionel Sambuc endpoint_t dst_e /* which process to notify */
1125433d6423SLionel Sambuc )
1126433d6423SLionel Sambuc {
1127433d6423SLionel Sambuc register struct proc *dst_ptr;
1128433d6423SLionel Sambuc int src_id; /* source id for late delivery */
1129433d6423SLionel Sambuc int dst_p;
1130433d6423SLionel Sambuc
1131433d6423SLionel Sambuc if (!isokendpt(dst_e, &dst_p)) {
1132433d6423SLionel Sambuc util_stacktrace();
1133433d6423SLionel Sambuc printf("mini_notify: bogus endpoint %d\n", dst_e);
1134433d6423SLionel Sambuc return EDEADSRCDST;
1135433d6423SLionel Sambuc }
1136433d6423SLionel Sambuc
1137433d6423SLionel Sambuc dst_ptr = proc_addr(dst_p);
1138433d6423SLionel Sambuc
1139433d6423SLionel Sambuc /* Check to see if target is blocked waiting for this message. A process
1140433d6423SLionel Sambuc * can be both sending and receiving during a SENDREC system call.
1141433d6423SLionel Sambuc */
1142c8a9900bSCristiano Giuffrida if (WILLRECEIVE(caller_ptr->p_endpoint, dst_ptr, 0, &m_notify_buff) &&
1143433d6423SLionel Sambuc !(dst_ptr->p_misc_flags & MF_REPLY_PEND)) {
1144433d6423SLionel Sambuc /* Destination is indeed waiting for a message. Assemble a notification
1145433d6423SLionel Sambuc * message and deliver it. Copy from pseudo-source HARDWARE, since the
1146433d6423SLionel Sambuc * message is in the kernel's address space.
1147433d6423SLionel Sambuc */
1148433d6423SLionel Sambuc assert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
1149433d6423SLionel Sambuc
1150433d6423SLionel Sambuc BuildNotifyMessage(&dst_ptr->p_delivermsg, proc_nr(caller_ptr), dst_ptr);
1151433d6423SLionel Sambuc dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
1152433d6423SLionel Sambuc dst_ptr->p_misc_flags |= MF_DELIVERMSG;
1153433d6423SLionel Sambuc
1154433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(dst_ptr, NOTIFY);
1155433d6423SLionel Sambuc RTS_UNSET(dst_ptr, RTS_RECEIVING);
1156433d6423SLionel Sambuc
1157433d6423SLionel Sambuc return(OK);
1158433d6423SLionel Sambuc }
1159433d6423SLionel Sambuc
1160433d6423SLionel Sambuc /* Destination is not ready to receive the notification. Add it to the
1161433d6423SLionel Sambuc * bit map with pending notifications. Note the indirectness: the privilege id
1162433d6423SLionel Sambuc * instead of the process number is used in the pending bit map.
1163433d6423SLionel Sambuc */
1164433d6423SLionel Sambuc src_id = priv(caller_ptr)->s_id;
1165433d6423SLionel Sambuc set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
1166433d6423SLionel Sambuc return(OK);
1167433d6423SLionel Sambuc }
1168433d6423SLionel Sambuc
1169433d6423SLionel Sambuc #define ASCOMPLAIN(caller, entry, field) \
1170433d6423SLionel Sambuc printf("kernel:%s:%d: asyn failed for %s in %s " \
11713c8950ccSBen Gras "(%d/%zu, tab 0x%lx)\n",__FILE__,__LINE__, \
1172433d6423SLionel Sambuc field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
1173433d6423SLionel Sambuc
1174433d6423SLionel Sambuc #define A_RETR(entry) do { \
1175433d6423SLionel Sambuc if (data_copy( \
1176433d6423SLionel Sambuc caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
1177433d6423SLionel Sambuc KERNEL, (vir_bytes) &tabent, \
1178433d6423SLionel Sambuc sizeof(tabent)) != OK) { \
1179433d6423SLionel Sambuc ASCOMPLAIN(caller_ptr, entry, "message entry"); \
1180433d6423SLionel Sambuc r = EFAULT; \
1181433d6423SLionel Sambuc goto asyn_error; \
1182433d6423SLionel Sambuc } \
1183c8a9900bSCristiano Giuffrida else if(tabent.dst == SELF) { \
1184c8a9900bSCristiano Giuffrida tabent.dst = caller_ptr->p_endpoint; \
1185c8a9900bSCristiano Giuffrida } \
1186433d6423SLionel Sambuc } while(0)
1187433d6423SLionel Sambuc
1188433d6423SLionel Sambuc #define A_INSRT(entry) do { \
1189433d6423SLionel Sambuc if (data_copy(KERNEL, (vir_bytes) &tabent, \
1190433d6423SLionel Sambuc caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
1191433d6423SLionel Sambuc sizeof(tabent)) != OK) { \
1192433d6423SLionel Sambuc ASCOMPLAIN(caller_ptr, entry, "message entry"); \
11933091b8cfSDavid van Moolenbroek /* Do NOT set r or goto asyn_error here! */ \
1194433d6423SLionel Sambuc } \
1195433d6423SLionel Sambuc } while(0)
1196433d6423SLionel Sambuc
1197433d6423SLionel Sambuc /*===========================================================================*
1198433d6423SLionel Sambuc * try_deliver_senda *
1199433d6423SLionel Sambuc *===========================================================================*/
try_deliver_senda(struct proc * caller_ptr,asynmsg_t * table,size_t size)1200433d6423SLionel Sambuc int try_deliver_senda(struct proc *caller_ptr,
1201433d6423SLionel Sambuc asynmsg_t *table,
1202433d6423SLionel Sambuc size_t size)
1203433d6423SLionel Sambuc {
1204433d6423SLionel Sambuc int r, dst_p, done, do_notify;
1205433d6423SLionel Sambuc unsigned int i;
1206433d6423SLionel Sambuc unsigned flags;
1207433d6423SLionel Sambuc endpoint_t dst;
1208433d6423SLionel Sambuc struct proc *dst_ptr;
1209433d6423SLionel Sambuc struct priv *privp;
1210433d6423SLionel Sambuc asynmsg_t tabent;
1211433d6423SLionel Sambuc const vir_bytes table_v = (vir_bytes) table;
1212c8a9900bSCristiano Giuffrida message *m_ptr = NULL;
1213433d6423SLionel Sambuc
1214433d6423SLionel Sambuc privp = priv(caller_ptr);
1215433d6423SLionel Sambuc
1216433d6423SLionel Sambuc /* Clear table */
1217433d6423SLionel Sambuc privp->s_asyntab = -1;
1218433d6423SLionel Sambuc privp->s_asynsize = 0;
1219062400c0SCristiano Giuffrida privp->s_asynendpoint = caller_ptr->p_endpoint;
1220433d6423SLionel Sambuc
1221433d6423SLionel Sambuc if (size == 0) return(OK); /* Nothing to do, just return */
1222433d6423SLionel Sambuc
1223433d6423SLionel Sambuc /* Scan the table */
1224433d6423SLionel Sambuc do_notify = FALSE;
1225433d6423SLionel Sambuc done = TRUE;
1226433d6423SLionel Sambuc
1227433d6423SLionel Sambuc /* Limit size to something reasonable. An arbitrary choice is 16
1228433d6423SLionel Sambuc * times the number of process table entries.
1229433d6423SLionel Sambuc *
1230433d6423SLionel Sambuc * (this check has been duplicated in sys_call but is left here
1231433d6423SLionel Sambuc * as a sanity check)
1232433d6423SLionel Sambuc */
1233433d6423SLionel Sambuc if (size > 16*(NR_TASKS + NR_PROCS)) {
1234433d6423SLionel Sambuc r = EDOM;
1235433d6423SLionel Sambuc return r;
1236433d6423SLionel Sambuc }
1237433d6423SLionel Sambuc
1238433d6423SLionel Sambuc for (i = 0; i < size; i++) {
1239433d6423SLionel Sambuc /* Process each entry in the table and store the result in the table.
1240433d6423SLionel Sambuc * If we're done handling a message, copy the result to the sender. */
1241433d6423SLionel Sambuc
1242433d6423SLionel Sambuc dst = NONE;
1243433d6423SLionel Sambuc /* Copy message to kernel */
1244433d6423SLionel Sambuc A_RETR(i);
1245433d6423SLionel Sambuc flags = tabent.flags;
1246433d6423SLionel Sambuc dst = tabent.dst;
1247433d6423SLionel Sambuc
1248433d6423SLionel Sambuc if (flags == 0) continue; /* Skip empty entries */
1249433d6423SLionel Sambuc
1250433d6423SLionel Sambuc /* 'flags' field must contain only valid bits */
1251433d6423SLionel Sambuc if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR)) {
1252433d6423SLionel Sambuc r = EINVAL;
1253433d6423SLionel Sambuc goto asyn_error;
1254433d6423SLionel Sambuc }
1255433d6423SLionel Sambuc if (!(flags & AMF_VALID)) { /* Must contain message */
1256433d6423SLionel Sambuc r = EINVAL;
1257433d6423SLionel Sambuc goto asyn_error;
1258433d6423SLionel Sambuc }
1259433d6423SLionel Sambuc if (flags & AMF_DONE) continue; /* Already done processing */
1260433d6423SLionel Sambuc
1261433d6423SLionel Sambuc r = OK;
1262433d6423SLionel Sambuc if (!isokendpt(tabent.dst, &dst_p))
1263433d6423SLionel Sambuc r = EDEADSRCDST; /* Bad destination, report the error */
1264433d6423SLionel Sambuc else if (iskerneln(dst_p))
1265433d6423SLionel Sambuc r = ECALLDENIED; /* Asyn sends to the kernel are not allowed */
1266c8a9900bSCristiano Giuffrida else if (!may_asynsend_to(caller_ptr, dst_p))
1267433d6423SLionel Sambuc r = ECALLDENIED; /* Send denied by IPC mask */
1268433d6423SLionel Sambuc else /* r == OK */
1269433d6423SLionel Sambuc dst_ptr = proc_addr(dst_p);
1270433d6423SLionel Sambuc
1271433d6423SLionel Sambuc /* XXX: RTS_NO_ENDPOINT should be removed */
1272433d6423SLionel Sambuc if (r == OK && RTS_ISSET(dst_ptr, RTS_NO_ENDPOINT)) {
1273433d6423SLionel Sambuc r = EDEADSRCDST;
1274433d6423SLionel Sambuc }
1275433d6423SLionel Sambuc
1276433d6423SLionel Sambuc /* Check if 'dst' is blocked waiting for this message.
1277433d6423SLionel Sambuc * If AMF_NOREPLY is set, do not satisfy the receiving part of
1278433d6423SLionel Sambuc * a SENDREC.
1279433d6423SLionel Sambuc */
1280c8a9900bSCristiano Giuffrida if (r == OK && WILLRECEIVE(caller_ptr->p_endpoint, dst_ptr,
1281c8a9900bSCristiano Giuffrida (vir_bytes)&table[i].msg, NULL) &&
1282433d6423SLionel Sambuc (!(flags&AMF_NOREPLY) || !(dst_ptr->p_misc_flags&MF_REPLY_PEND))) {
1283433d6423SLionel Sambuc /* Destination is indeed waiting for this message. */
1284433d6423SLionel Sambuc dst_ptr->p_delivermsg = tabent.msg;
1285433d6423SLionel Sambuc dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
1286433d6423SLionel Sambuc dst_ptr->p_misc_flags |= MF_DELIVERMSG;
1287433d6423SLionel Sambuc IPC_STATUS_ADD_CALL(dst_ptr, SENDA);
1288433d6423SLionel Sambuc RTS_UNSET(dst_ptr, RTS_RECEIVING);
1289433d6423SLionel Sambuc #if DEBUG_IPC_HOOK
1290433d6423SLionel Sambuc hook_ipc_msgrecv(&dst_ptr->p_delivermsg, caller_ptr, dst_ptr);
1291433d6423SLionel Sambuc #endif
1292433d6423SLionel Sambuc } else if (r == OK) {
1293433d6423SLionel Sambuc /* Inform receiver that something is pending */
1294433d6423SLionel Sambuc set_sys_bit(priv(dst_ptr)->s_asyn_pending,
1295433d6423SLionel Sambuc priv(caller_ptr)->s_id);
1296433d6423SLionel Sambuc done = FALSE;
1297433d6423SLionel Sambuc continue;
1298433d6423SLionel Sambuc }
1299433d6423SLionel Sambuc
1300433d6423SLionel Sambuc /* Store results */
1301433d6423SLionel Sambuc tabent.result = r;
1302433d6423SLionel Sambuc tabent.flags = flags | AMF_DONE;
1303433d6423SLionel Sambuc if (flags & AMF_NOTIFY)
1304433d6423SLionel Sambuc do_notify = TRUE;
1305433d6423SLionel Sambuc else if (r != OK && (flags & AMF_NOTIFY_ERR))
1306433d6423SLionel Sambuc do_notify = TRUE;
13073091b8cfSDavid van Moolenbroek A_INSRT(i); /* Copy results to caller; ignore errors */
1308433d6423SLionel Sambuc continue;
1309433d6423SLionel Sambuc
1310433d6423SLionel Sambuc asyn_error:
1311433d6423SLionel Sambuc if (dst != NONE)
1312433d6423SLionel Sambuc printf("KERNEL senda error %d to %d\n", r, dst);
1313433d6423SLionel Sambuc else
1314433d6423SLionel Sambuc printf("KERNEL senda error %d\n", r);
1315433d6423SLionel Sambuc }
1316433d6423SLionel Sambuc
1317433d6423SLionel Sambuc if (do_notify)
1318433d6423SLionel Sambuc mini_notify(proc_addr(ASYNCM), caller_ptr->p_endpoint);
1319433d6423SLionel Sambuc
1320433d6423SLionel Sambuc if (!done) {
1321433d6423SLionel Sambuc privp->s_asyntab = (vir_bytes) table;
1322433d6423SLionel Sambuc privp->s_asynsize = size;
1323433d6423SLionel Sambuc }
1324433d6423SLionel Sambuc
1325433d6423SLionel Sambuc return(OK);
1326433d6423SLionel Sambuc }
1327433d6423SLionel Sambuc
1328433d6423SLionel Sambuc /*===========================================================================*
1329433d6423SLionel Sambuc * mini_senda *
1330433d6423SLionel Sambuc *===========================================================================*/
mini_senda(struct proc * caller_ptr,asynmsg_t * table,size_t size)1331433d6423SLionel Sambuc static int mini_senda(struct proc *caller_ptr, asynmsg_t *table, size_t size)
1332433d6423SLionel Sambuc {
1333433d6423SLionel Sambuc struct priv *privp;
1334433d6423SLionel Sambuc
1335433d6423SLionel Sambuc privp = priv(caller_ptr);
1336433d6423SLionel Sambuc if (!(privp->s_flags & SYS_PROC)) {
1337433d6423SLionel Sambuc printf( "mini_senda: warning caller has no privilege structure\n");
1338433d6423SLionel Sambuc return(EPERM);
1339433d6423SLionel Sambuc }
1340433d6423SLionel Sambuc
1341433d6423SLionel Sambuc return try_deliver_senda(caller_ptr, table, size);
1342433d6423SLionel Sambuc }
1343433d6423SLionel Sambuc
1344433d6423SLionel Sambuc
1345433d6423SLionel Sambuc /*===========================================================================*
1346433d6423SLionel Sambuc * try_async *
1347433d6423SLionel Sambuc *===========================================================================*/
try_async(struct proc * caller_ptr)13486077d1adSDr. Florian Grätz static int try_async(struct proc * caller_ptr)
1349433d6423SLionel Sambuc {
1350433d6423SLionel Sambuc int r;
1351433d6423SLionel Sambuc struct priv *privp;
1352433d6423SLionel Sambuc struct proc *src_ptr;
1353433d6423SLionel Sambuc sys_map_t *map;
1354433d6423SLionel Sambuc
1355433d6423SLionel Sambuc map = &priv(caller_ptr)->s_asyn_pending;
1356433d6423SLionel Sambuc
1357433d6423SLionel Sambuc /* Try all privilege structures */
1358433d6423SLionel Sambuc for (privp = BEG_PRIV_ADDR; privp < END_PRIV_ADDR; ++privp) {
1359433d6423SLionel Sambuc if (privp->s_proc_nr == NONE)
1360433d6423SLionel Sambuc continue;
1361433d6423SLionel Sambuc
1362433d6423SLionel Sambuc if (!get_sys_bit(*map, privp->s_id))
1363433d6423SLionel Sambuc continue;
1364433d6423SLionel Sambuc
1365433d6423SLionel Sambuc src_ptr = proc_addr(privp->s_proc_nr);
1366433d6423SLionel Sambuc
1367433d6423SLionel Sambuc #ifdef CONFIG_SMP
1368433d6423SLionel Sambuc /*
1369433d6423SLionel Sambuc * Do not copy from a process which does not have a stable address space
1370433d6423SLionel Sambuc * due to VM fiddling with it
1371433d6423SLionel Sambuc */
1372433d6423SLionel Sambuc if (RTS_ISSET(src_ptr, RTS_VMINHIBIT)) {
1373433d6423SLionel Sambuc src_ptr->p_misc_flags |= MF_SENDA_VM_MISS;
1374433d6423SLionel Sambuc continue;
1375433d6423SLionel Sambuc }
1376433d6423SLionel Sambuc #endif
1377433d6423SLionel Sambuc
1378433d6423SLionel Sambuc assert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
1379c8a9900bSCristiano Giuffrida if ((r = try_one(ANY, src_ptr, caller_ptr)) == OK)
1380433d6423SLionel Sambuc return(r);
1381433d6423SLionel Sambuc }
1382433d6423SLionel Sambuc
1383433d6423SLionel Sambuc return(ESRCH);
1384433d6423SLionel Sambuc }
1385433d6423SLionel Sambuc
1386433d6423SLionel Sambuc
1387433d6423SLionel Sambuc /*===========================================================================*
1388433d6423SLionel Sambuc * try_one *
1389433d6423SLionel Sambuc *===========================================================================*/
try_one(endpoint_t receive_e,struct proc * src_ptr,struct proc * dst_ptr)1390c8a9900bSCristiano Giuffrida static int try_one(endpoint_t receive_e, struct proc *src_ptr,
1391c8a9900bSCristiano Giuffrida struct proc *dst_ptr)
1392433d6423SLionel Sambuc {
1393433d6423SLionel Sambuc /* Try to receive an asynchronous message from 'src_ptr' */
1394433d6423SLionel Sambuc int r = EAGAIN, done, do_notify;
1395433d6423SLionel Sambuc unsigned int flags, i;
1396433d6423SLionel Sambuc size_t size;
1397c8a9900bSCristiano Giuffrida endpoint_t dst, src_e;
1398433d6423SLionel Sambuc struct proc *caller_ptr;
1399433d6423SLionel Sambuc struct priv *privp;
1400433d6423SLionel Sambuc asynmsg_t tabent;
1401433d6423SLionel Sambuc vir_bytes table_v;
1402433d6423SLionel Sambuc
1403433d6423SLionel Sambuc privp = priv(src_ptr);
1404433d6423SLionel Sambuc if (!(privp->s_flags & SYS_PROC)) return(EPERM);
1405433d6423SLionel Sambuc size = privp->s_asynsize;
1406433d6423SLionel Sambuc table_v = privp->s_asyntab;
1407433d6423SLionel Sambuc
1408433d6423SLionel Sambuc /* Clear table pending message flag. We're done unless we're not. */
1409433d6423SLionel Sambuc unset_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
1410433d6423SLionel Sambuc
1411433d6423SLionel Sambuc if (size == 0) return(EAGAIN);
1412062400c0SCristiano Giuffrida if (privp->s_asynendpoint != src_ptr->p_endpoint) return EAGAIN;
1413c8a9900bSCristiano Giuffrida if (!may_asynsend_to(src_ptr, proc_nr(dst_ptr))) return (ECALLDENIED);
1414433d6423SLionel Sambuc
1415433d6423SLionel Sambuc caller_ptr = src_ptr; /* Needed for A_ macros later on */
1416c8a9900bSCristiano Giuffrida src_e = src_ptr->p_endpoint;
1417433d6423SLionel Sambuc
1418433d6423SLionel Sambuc /* Scan the table */
1419433d6423SLionel Sambuc do_notify = FALSE;
1420433d6423SLionel Sambuc done = TRUE;
1421433d6423SLionel Sambuc
1422433d6423SLionel Sambuc for (i = 0; i < size; i++) {
1423433d6423SLionel Sambuc /* Process each entry in the table and store the result in the table.
1424433d6423SLionel Sambuc * If we're done handling a message, copy the result to the sender.
1425433d6423SLionel Sambuc * Some checks done in mini_senda are duplicated here, as the sender
1426433d6423SLionel Sambuc * could've altered the contents of the table in the meantime.
1427433d6423SLionel Sambuc */
1428433d6423SLionel Sambuc
1429433d6423SLionel Sambuc /* Copy message to kernel */
1430433d6423SLionel Sambuc A_RETR(i);
1431433d6423SLionel Sambuc flags = tabent.flags;
1432433d6423SLionel Sambuc dst = tabent.dst;
1433433d6423SLionel Sambuc
1434433d6423SLionel Sambuc if (flags == 0) continue; /* Skip empty entries */
1435433d6423SLionel Sambuc
1436433d6423SLionel Sambuc /* 'flags' field must contain only valid bits */
1437433d6423SLionel Sambuc if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR))
1438433d6423SLionel Sambuc r = EINVAL;
1439433d6423SLionel Sambuc else if (!(flags & AMF_VALID)) /* Must contain message */
1440433d6423SLionel Sambuc r = EINVAL;
1441433d6423SLionel Sambuc else if (flags & AMF_DONE) continue; /* Already done processing */
1442433d6423SLionel Sambuc
1443433d6423SLionel Sambuc /* Clear done flag. The sender is done sending when all messages in the
1444433d6423SLionel Sambuc * table are marked done or empty. However, we will know that only
1445433d6423SLionel Sambuc * the next time we enter this function or when the sender decides to
1446433d6423SLionel Sambuc * send additional asynchronous messages and manages to deliver them
1447433d6423SLionel Sambuc * all.
1448433d6423SLionel Sambuc */
1449433d6423SLionel Sambuc done = FALSE;
1450433d6423SLionel Sambuc
1451433d6423SLionel Sambuc if (r == EINVAL)
1452433d6423SLionel Sambuc goto store_result;
1453433d6423SLionel Sambuc
1454433d6423SLionel Sambuc /* Message must be directed at receiving end */
1455433d6423SLionel Sambuc if (dst != dst_ptr->p_endpoint) continue;
1456433d6423SLionel Sambuc
1457c8a9900bSCristiano Giuffrida if (!CANRECEIVE(receive_e, src_e, dst_ptr,
1458c8a9900bSCristiano Giuffrida table_v + i*sizeof(asynmsg_t) + offsetof(struct asynmsg,msg),
1459c8a9900bSCristiano Giuffrida NULL)) {
1460c8a9900bSCristiano Giuffrida continue;
1461c8a9900bSCristiano Giuffrida }
1462c8a9900bSCristiano Giuffrida
1463433d6423SLionel Sambuc /* If AMF_NOREPLY is set, then this message is not a reply to a
1464433d6423SLionel Sambuc * SENDREC and thus should not satisfy the receiving part of the
1465433d6423SLionel Sambuc * SENDREC. This message is to be delivered later.
1466433d6423SLionel Sambuc */
1467433d6423SLionel Sambuc if ((flags & AMF_NOREPLY) && (dst_ptr->p_misc_flags & MF_REPLY_PEND))
1468433d6423SLionel Sambuc continue;
1469433d6423SLionel Sambuc
1470433d6423SLionel Sambuc /* Destination is ready to receive the message; deliver it */
1471433d6423SLionel Sambuc r = OK;
1472433d6423SLionel Sambuc dst_ptr->p_delivermsg = tabent.msg;
1473433d6423SLionel Sambuc dst_ptr->p_delivermsg.m_source = src_ptr->p_endpoint;
1474433d6423SLionel Sambuc dst_ptr->p_misc_flags |= MF_DELIVERMSG;
1475433d6423SLionel Sambuc #if DEBUG_IPC_HOOK
1476433d6423SLionel Sambuc hook_ipc_msgrecv(&dst_ptr->p_delivermsg, src_ptr, dst_ptr);
1477433d6423SLionel Sambuc #endif
1478433d6423SLionel Sambuc
1479433d6423SLionel Sambuc store_result:
14803091b8cfSDavid van Moolenbroek /* Store results for sender. We may just have started delivering a
14813091b8cfSDavid van Moolenbroek * message, so we must not return an error to the caller in the case
14823091b8cfSDavid van Moolenbroek * that storing the results triggers an error!
14833091b8cfSDavid van Moolenbroek */
1484433d6423SLionel Sambuc tabent.result = r;
1485433d6423SLionel Sambuc tabent.flags = flags | AMF_DONE;
1486433d6423SLionel Sambuc if (flags & AMF_NOTIFY) do_notify = TRUE;
1487433d6423SLionel Sambuc else if (r != OK && (flags & AMF_NOTIFY_ERR)) do_notify = TRUE;
14883091b8cfSDavid van Moolenbroek A_INSRT(i); /* Copy results to sender; ignore errors */
1489433d6423SLionel Sambuc
1490433d6423SLionel Sambuc break;
1491433d6423SLionel Sambuc }
1492433d6423SLionel Sambuc
1493433d6423SLionel Sambuc if (do_notify)
1494433d6423SLionel Sambuc mini_notify(proc_addr(ASYNCM), src_ptr->p_endpoint);
1495433d6423SLionel Sambuc
1496433d6423SLionel Sambuc if (done) {
1497433d6423SLionel Sambuc privp->s_asyntab = -1;
1498433d6423SLionel Sambuc privp->s_asynsize = 0;
1499433d6423SLionel Sambuc } else {
1500433d6423SLionel Sambuc set_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
1501433d6423SLionel Sambuc }
1502433d6423SLionel Sambuc
1503433d6423SLionel Sambuc asyn_error:
1504433d6423SLionel Sambuc return(r);
1505433d6423SLionel Sambuc }
1506433d6423SLionel Sambuc
1507433d6423SLionel Sambuc /*===========================================================================*
1508433d6423SLionel Sambuc * cancel_async *
1509433d6423SLionel Sambuc *===========================================================================*/
cancel_async(struct proc * src_ptr,struct proc * dst_ptr)1510433d6423SLionel Sambuc int cancel_async(struct proc *src_ptr, struct proc *dst_ptr)
1511433d6423SLionel Sambuc {
1512433d6423SLionel Sambuc /* Cancel asynchronous messages from src to dst, because dst is not interested
1513433d6423SLionel Sambuc * in them (e.g., dst has been restarted) */
1514433d6423SLionel Sambuc int done, do_notify;
1515433d6423SLionel Sambuc unsigned int flags, i;
1516433d6423SLionel Sambuc size_t size;
1517433d6423SLionel Sambuc endpoint_t dst;
1518433d6423SLionel Sambuc struct proc *caller_ptr;
1519433d6423SLionel Sambuc struct priv *privp;
1520433d6423SLionel Sambuc asynmsg_t tabent;
1521433d6423SLionel Sambuc vir_bytes table_v;
1522433d6423SLionel Sambuc
1523433d6423SLionel Sambuc privp = priv(src_ptr);
1524433d6423SLionel Sambuc if (!(privp->s_flags & SYS_PROC)) return(EPERM);
1525433d6423SLionel Sambuc size = privp->s_asynsize;
1526433d6423SLionel Sambuc table_v = privp->s_asyntab;
1527433d6423SLionel Sambuc
1528433d6423SLionel Sambuc /* Clear table pending message flag. We're done unless we're not. */
1529433d6423SLionel Sambuc privp->s_asyntab = -1;
1530433d6423SLionel Sambuc privp->s_asynsize = 0;
1531433d6423SLionel Sambuc unset_sys_bit(priv(dst_ptr)->s_asyn_pending, privp->s_id);
1532433d6423SLionel Sambuc
1533433d6423SLionel Sambuc if (size == 0) return(EAGAIN);
1534433d6423SLionel Sambuc if (!may_send_to(src_ptr, proc_nr(dst_ptr))) return(ECALLDENIED);
1535433d6423SLionel Sambuc
1536433d6423SLionel Sambuc caller_ptr = src_ptr; /* Needed for A_ macros later on */
1537433d6423SLionel Sambuc
1538433d6423SLionel Sambuc /* Scan the table */
1539433d6423SLionel Sambuc do_notify = FALSE;
1540433d6423SLionel Sambuc done = TRUE;
1541433d6423SLionel Sambuc
1542433d6423SLionel Sambuc
1543433d6423SLionel Sambuc for (i = 0; i < size; i++) {
1544433d6423SLionel Sambuc /* Process each entry in the table and store the result in the table.
1545433d6423SLionel Sambuc * If we're done handling a message, copy the result to the sender.
1546433d6423SLionel Sambuc * Some checks done in mini_senda are duplicated here, as the sender
1547433d6423SLionel Sambuc * could've altered the contents of the table in the mean time.
1548433d6423SLionel Sambuc */
1549433d6423SLionel Sambuc
1550433d6423SLionel Sambuc int r = EDEADSRCDST; /* Cancel delivery due to dead dst */
1551433d6423SLionel Sambuc
1552433d6423SLionel Sambuc /* Copy message to kernel */
1553433d6423SLionel Sambuc A_RETR(i);
1554433d6423SLionel Sambuc flags = tabent.flags;
1555433d6423SLionel Sambuc dst = tabent.dst;
1556433d6423SLionel Sambuc
1557433d6423SLionel Sambuc if (flags == 0) continue; /* Skip empty entries */
1558433d6423SLionel Sambuc
1559433d6423SLionel Sambuc /* 'flags' field must contain only valid bits */
1560433d6423SLionel Sambuc if(flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY|AMF_NOTIFY_ERR))
1561433d6423SLionel Sambuc r = EINVAL;
1562433d6423SLionel Sambuc else if (!(flags & AMF_VALID)) /* Must contain message */
1563433d6423SLionel Sambuc r = EINVAL;
1564433d6423SLionel Sambuc else if (flags & AMF_DONE) continue; /* Already done processing */
1565433d6423SLionel Sambuc
1566433d6423SLionel Sambuc /* Message must be directed at receiving end */
1567433d6423SLionel Sambuc if (dst != dst_ptr->p_endpoint) {
1568433d6423SLionel Sambuc done = FALSE;
1569433d6423SLionel Sambuc continue;
1570433d6423SLionel Sambuc }
1571433d6423SLionel Sambuc
1572433d6423SLionel Sambuc /* Store results for sender */
1573433d6423SLionel Sambuc tabent.result = r;
1574433d6423SLionel Sambuc tabent.flags = flags | AMF_DONE;
1575433d6423SLionel Sambuc if (flags & AMF_NOTIFY) do_notify = TRUE;
1576433d6423SLionel Sambuc else if (r != OK && (flags & AMF_NOTIFY_ERR)) do_notify = TRUE;
15773091b8cfSDavid van Moolenbroek A_INSRT(i); /* Copy results to sender; ignore errors */
1578433d6423SLionel Sambuc }
1579433d6423SLionel Sambuc
1580433d6423SLionel Sambuc if (do_notify)
1581433d6423SLionel Sambuc mini_notify(proc_addr(ASYNCM), src_ptr->p_endpoint);
1582433d6423SLionel Sambuc
1583433d6423SLionel Sambuc if (!done) {
1584433d6423SLionel Sambuc privp->s_asyntab = table_v;
1585433d6423SLionel Sambuc privp->s_asynsize = size;
1586433d6423SLionel Sambuc }
1587433d6423SLionel Sambuc
1588433d6423SLionel Sambuc asyn_error:
1589433d6423SLionel Sambuc return(OK);
1590433d6423SLionel Sambuc }
1591433d6423SLionel Sambuc
1592433d6423SLionel Sambuc /*===========================================================================*
1593433d6423SLionel Sambuc * enqueue *
1594433d6423SLionel Sambuc *===========================================================================*/
enqueue(register struct proc * rp)1595433d6423SLionel Sambuc void enqueue(
1596433d6423SLionel Sambuc register struct proc *rp /* this process is now runnable */
1597433d6423SLionel Sambuc )
1598433d6423SLionel Sambuc {
1599433d6423SLionel Sambuc /* Add 'rp' to one of the queues of runnable processes. This function is
1600433d6423SLionel Sambuc * responsible for inserting a process into one of the scheduling queues.
1601433d6423SLionel Sambuc * The mechanism is implemented here. The actual scheduling policy is
1602433d6423SLionel Sambuc * defined in sched() and pick_proc().
1603433d6423SLionel Sambuc *
1604433d6423SLionel Sambuc * This function can be used x-cpu as it always uses the queues of the cpu the
1605433d6423SLionel Sambuc * process is assigned to.
1606433d6423SLionel Sambuc */
1607433d6423SLionel Sambuc int q = rp->p_priority; /* scheduling queue to use */
1608433d6423SLionel Sambuc struct proc **rdy_head, **rdy_tail;
1609433d6423SLionel Sambuc
1610433d6423SLionel Sambuc assert(proc_is_runnable(rp));
1611433d6423SLionel Sambuc
1612433d6423SLionel Sambuc assert(q >= 0);
1613433d6423SLionel Sambuc
1614433d6423SLionel Sambuc rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
1615433d6423SLionel Sambuc rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
1616433d6423SLionel Sambuc
1617433d6423SLionel Sambuc /* Now add the process to the queue. */
1618433d6423SLionel Sambuc if (!rdy_head[q]) { /* add to empty queue */
1619433d6423SLionel Sambuc rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
1620433d6423SLionel Sambuc rp->p_nextready = NULL; /* mark new end */
1621433d6423SLionel Sambuc }
1622433d6423SLionel Sambuc else { /* add to tail of queue */
1623433d6423SLionel Sambuc rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
1624433d6423SLionel Sambuc rdy_tail[q] = rp; /* set new queue tail */
1625433d6423SLionel Sambuc rp->p_nextready = NULL; /* mark new end */
1626433d6423SLionel Sambuc }
1627433d6423SLionel Sambuc
1628433d6423SLionel Sambuc if (cpuid == rp->p_cpu) {
1629433d6423SLionel Sambuc /*
1630433d6423SLionel Sambuc * enqueueing a process with a higher priority than the current one,
1631433d6423SLionel Sambuc * it gets preempted. The current process must be preemptible. Testing
1632433d6423SLionel Sambuc * the priority also makes sure that a process does not preempt itself
1633433d6423SLionel Sambuc */
1634433d6423SLionel Sambuc struct proc * p;
1635433d6423SLionel Sambuc p = get_cpulocal_var(proc_ptr);
1636433d6423SLionel Sambuc assert(p);
1637433d6423SLionel Sambuc if((p->p_priority > rp->p_priority) &&
1638433d6423SLionel Sambuc (priv(p)->s_flags & PREEMPTIBLE))
1639433d6423SLionel Sambuc RTS_SET(p, RTS_PREEMPTED); /* calls dequeue() */
1640433d6423SLionel Sambuc }
1641433d6423SLionel Sambuc #ifdef CONFIG_SMP
1642433d6423SLionel Sambuc /*
1643433d6423SLionel Sambuc * if the process was enqueued on a different cpu and the cpu is idle, i.e.
1644433d6423SLionel Sambuc * the time is off, we need to wake up that cpu and let it schedule this new
1645433d6423SLionel Sambuc * process
1646433d6423SLionel Sambuc */
1647433d6423SLionel Sambuc else if (get_cpu_var(rp->p_cpu, cpu_is_idle)) {
1648433d6423SLionel Sambuc smp_schedule(rp->p_cpu);
1649433d6423SLionel Sambuc }
1650433d6423SLionel Sambuc #endif
1651433d6423SLionel Sambuc
1652433d6423SLionel Sambuc /* Make note of when this process was added to queue */
1653433d6423SLionel Sambuc read_tsc_64(&(get_cpulocal_var(proc_ptr)->p_accounting.enter_queue));
1654433d6423SLionel Sambuc
1655433d6423SLionel Sambuc
1656433d6423SLionel Sambuc #if DEBUG_SANITYCHECKS
1657433d6423SLionel Sambuc assert(runqueues_ok_local());
1658433d6423SLionel Sambuc #endif
1659433d6423SLionel Sambuc }
1660433d6423SLionel Sambuc
1661433d6423SLionel Sambuc /*===========================================================================*
1662433d6423SLionel Sambuc * enqueue_head *
1663433d6423SLionel Sambuc *===========================================================================*/
1664433d6423SLionel Sambuc /*
1665433d6423SLionel Sambuc * put a process at the front of its run queue. It comes handy when a process is
1666433d6423SLionel Sambuc * preempted and removed from run queue to not to have a currently not-runnable
1667433d6423SLionel Sambuc * process on a run queue. We have to put this process back at the fron to be
1668433d6423SLionel Sambuc * fair
1669433d6423SLionel Sambuc */
enqueue_head(struct proc * rp)1670433d6423SLionel Sambuc static void enqueue_head(struct proc *rp)
1671433d6423SLionel Sambuc {
1672433d6423SLionel Sambuc const int q = rp->p_priority; /* scheduling queue to use */
1673433d6423SLionel Sambuc
1674433d6423SLionel Sambuc struct proc **rdy_head, **rdy_tail;
1675433d6423SLionel Sambuc
1676433d6423SLionel Sambuc assert(proc_ptr_ok(rp));
1677433d6423SLionel Sambuc assert(proc_is_runnable(rp));
1678433d6423SLionel Sambuc
1679433d6423SLionel Sambuc /*
1680433d6423SLionel Sambuc * the process was runnable without its quantum expired when dequeued. A
1681433d6423SLionel Sambuc * process with no time left should have been handled else and differently
1682433d6423SLionel Sambuc */
1683433d6423SLionel Sambuc assert(rp->p_cpu_time_left);
1684433d6423SLionel Sambuc
1685433d6423SLionel Sambuc assert(q >= 0);
1686433d6423SLionel Sambuc
1687433d6423SLionel Sambuc
1688433d6423SLionel Sambuc rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
1689433d6423SLionel Sambuc rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
1690433d6423SLionel Sambuc
1691433d6423SLionel Sambuc /* Now add the process to the queue. */
1692433d6423SLionel Sambuc if (!rdy_head[q]) { /* add to empty queue */
1693433d6423SLionel Sambuc rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
1694433d6423SLionel Sambuc rp->p_nextready = NULL; /* mark new end */
1695433d6423SLionel Sambuc } else { /* add to head of queue */
1696433d6423SLionel Sambuc rp->p_nextready = rdy_head[q]; /* chain head of queue */
1697433d6423SLionel Sambuc rdy_head[q] = rp; /* set new queue head */
1698433d6423SLionel Sambuc }
1699433d6423SLionel Sambuc
1700433d6423SLionel Sambuc /* Make note of when this process was added to queue */
1701433d6423SLionel Sambuc read_tsc_64(&(get_cpulocal_var(proc_ptr->p_accounting.enter_queue)));
1702433d6423SLionel Sambuc
1703433d6423SLionel Sambuc
1704433d6423SLionel Sambuc /* Process accounting for scheduling */
1705433d6423SLionel Sambuc rp->p_accounting.dequeues--;
1706433d6423SLionel Sambuc rp->p_accounting.preempted++;
1707433d6423SLionel Sambuc
1708433d6423SLionel Sambuc #if DEBUG_SANITYCHECKS
1709433d6423SLionel Sambuc assert(runqueues_ok_local());
1710433d6423SLionel Sambuc #endif
1711433d6423SLionel Sambuc }
1712433d6423SLionel Sambuc
1713433d6423SLionel Sambuc /*===========================================================================*
1714433d6423SLionel Sambuc * dequeue *
1715433d6423SLionel Sambuc *===========================================================================*/
dequeue(struct proc * rp)1716433d6423SLionel Sambuc void dequeue(struct proc *rp)
1717433d6423SLionel Sambuc /* this process is no longer runnable */
1718433d6423SLionel Sambuc {
1719433d6423SLionel Sambuc /* A process must be removed from the scheduling queues, for example, because
1720433d6423SLionel Sambuc * it has blocked. If the currently active process is removed, a new process
1721433d6423SLionel Sambuc * is picked to run by calling pick_proc().
1722433d6423SLionel Sambuc *
1723433d6423SLionel Sambuc * This function can operate x-cpu as it always removes the process from the
1724433d6423SLionel Sambuc * queue of the cpu the process is currently assigned to.
1725433d6423SLionel Sambuc */
1726433d6423SLionel Sambuc int q = rp->p_priority; /* queue to use */
1727433d6423SLionel Sambuc struct proc **xpp; /* iterate over queue */
1728433d6423SLionel Sambuc struct proc *prev_xp;
1729433d6423SLionel Sambuc u64_t tsc, tsc_delta;
1730433d6423SLionel Sambuc
1731433d6423SLionel Sambuc struct proc **rdy_tail;
1732433d6423SLionel Sambuc
1733433d6423SLionel Sambuc assert(proc_ptr_ok(rp));
1734433d6423SLionel Sambuc assert(!proc_is_runnable(rp));
1735433d6423SLionel Sambuc
1736433d6423SLionel Sambuc /* Side-effect for kernel: check if the task's stack still is ok? */
1737433d6423SLionel Sambuc assert (!iskernelp(rp) || *priv(rp)->s_stack_guard == STACK_GUARD);
1738433d6423SLionel Sambuc
1739433d6423SLionel Sambuc rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
1740433d6423SLionel Sambuc
1741433d6423SLionel Sambuc /* Now make sure that the process is not in its ready queue. Remove the
1742433d6423SLionel Sambuc * process if it is found. A process can be made unready even if it is not
1743433d6423SLionel Sambuc * running by being sent a signal that kills it.
1744433d6423SLionel Sambuc */
1745433d6423SLionel Sambuc prev_xp = NULL;
1746433d6423SLionel Sambuc for (xpp = get_cpu_var_ptr(rp->p_cpu, run_q_head[q]); *xpp;
1747433d6423SLionel Sambuc xpp = &(*xpp)->p_nextready) {
1748433d6423SLionel Sambuc if (*xpp == rp) { /* found process to remove */
1749433d6423SLionel Sambuc *xpp = (*xpp)->p_nextready; /* replace with next chain */
1750433d6423SLionel Sambuc if (rp == rdy_tail[q]) { /* queue tail removed */
1751433d6423SLionel Sambuc rdy_tail[q] = prev_xp; /* set new tail */
1752433d6423SLionel Sambuc }
1753433d6423SLionel Sambuc
1754433d6423SLionel Sambuc break;
1755433d6423SLionel Sambuc }
1756433d6423SLionel Sambuc prev_xp = *xpp; /* save previous in chain */
1757433d6423SLionel Sambuc }
1758433d6423SLionel Sambuc
1759433d6423SLionel Sambuc
1760433d6423SLionel Sambuc /* Process accounting for scheduling */
1761433d6423SLionel Sambuc rp->p_accounting.dequeues++;
1762433d6423SLionel Sambuc
1763433d6423SLionel Sambuc /* this is not all that accurate on virtual machines, especially with
1764433d6423SLionel Sambuc IO bound processes that only spend a short amount of time in the queue
1765433d6423SLionel Sambuc at a time. */
1766433d6423SLionel Sambuc if (rp->p_accounting.enter_queue) {
1767433d6423SLionel Sambuc read_tsc_64(&tsc);
1768433d6423SLionel Sambuc tsc_delta = tsc - rp->p_accounting.enter_queue;
1769433d6423SLionel Sambuc rp->p_accounting.time_in_queue = rp->p_accounting.time_in_queue +
1770433d6423SLionel Sambuc tsc_delta;
1771433d6423SLionel Sambuc rp->p_accounting.enter_queue = 0;
1772433d6423SLionel Sambuc }
1773433d6423SLionel Sambuc
17746b0f33d0SDavid van Moolenbroek /* For ps(1), remember when the process was last dequeued. */
17756b0f33d0SDavid van Moolenbroek rp->p_dequeued = get_monotonic();
1776433d6423SLionel Sambuc
1777433d6423SLionel Sambuc #if DEBUG_SANITYCHECKS
1778433d6423SLionel Sambuc assert(runqueues_ok_local());
1779433d6423SLionel Sambuc #endif
1780433d6423SLionel Sambuc }
1781433d6423SLionel Sambuc
1782433d6423SLionel Sambuc /*===========================================================================*
1783433d6423SLionel Sambuc * pick_proc *
1784433d6423SLionel Sambuc *===========================================================================*/
pick_proc(void)1785433d6423SLionel Sambuc static struct proc * pick_proc(void)
1786433d6423SLionel Sambuc {
1787*8e2f9ecaSTai Groot /* Decide who to run now. A new process is selected and returned.
1788433d6423SLionel Sambuc * When a billable process is selected, record it in 'bill_ptr', so that the
1789433d6423SLionel Sambuc * clock task can tell who to bill for system time.
1790433d6423SLionel Sambuc *
1791433d6423SLionel Sambuc * This function always uses the run queues of the local cpu!
1792433d6423SLionel Sambuc */
1793433d6423SLionel Sambuc register struct proc *rp; /* process to run */
1794433d6423SLionel Sambuc struct proc **rdy_head;
1795433d6423SLionel Sambuc int q; /* iterate over queues */
1796433d6423SLionel Sambuc
1797433d6423SLionel Sambuc /* Check each of the scheduling queues for ready processes. The number of
1798433d6423SLionel Sambuc * queues is defined in proc.h, and priorities are set in the task table.
1799433d6423SLionel Sambuc * If there are no processes ready to run, return NULL.
1800433d6423SLionel Sambuc */
1801433d6423SLionel Sambuc rdy_head = get_cpulocal_var(run_q_head);
1802433d6423SLionel Sambuc for (q=0; q < NR_SCHED_QUEUES; q++) {
1803433d6423SLionel Sambuc if(!(rp = rdy_head[q])) {
1804433d6423SLionel Sambuc TRACE(VF_PICKPROC, printf("cpu %d queue %d empty\n", cpuid, q););
1805433d6423SLionel Sambuc continue;
1806433d6423SLionel Sambuc }
1807433d6423SLionel Sambuc assert(proc_is_runnable(rp));
1808433d6423SLionel Sambuc if (priv(rp)->s_flags & BILLABLE)
1809433d6423SLionel Sambuc get_cpulocal_var(bill_ptr) = rp; /* bill for system time */
1810433d6423SLionel Sambuc return rp;
1811433d6423SLionel Sambuc }
1812433d6423SLionel Sambuc return NULL;
1813433d6423SLionel Sambuc }
1814433d6423SLionel Sambuc
1815433d6423SLionel Sambuc /*===========================================================================*
1816433d6423SLionel Sambuc * endpoint_lookup *
1817433d6423SLionel Sambuc *===========================================================================*/
endpoint_lookup(endpoint_t e)1818433d6423SLionel Sambuc struct proc *endpoint_lookup(endpoint_t e)
1819433d6423SLionel Sambuc {
1820433d6423SLionel Sambuc int n;
1821433d6423SLionel Sambuc
1822433d6423SLionel Sambuc if(!isokendpt(e, &n)) return NULL;
1823433d6423SLionel Sambuc
1824433d6423SLionel Sambuc return proc_addr(n);
1825433d6423SLionel Sambuc }
1826433d6423SLionel Sambuc
1827433d6423SLionel Sambuc /*===========================================================================*
1828433d6423SLionel Sambuc * isokendpt_f *
1829433d6423SLionel Sambuc *===========================================================================*/
1830433d6423SLionel Sambuc #if DEBUG_ENABLE_IPC_WARNINGS
isokendpt_f(const char * file,int line,endpoint_t e,int * p,const int fatalflag)18316077d1adSDr. Florian Grätz int isokendpt_f(const char * file, int line, endpoint_t e, int * p,
18326077d1adSDr. Florian Grätz const int fatalflag)
1833433d6423SLionel Sambuc #else
18346077d1adSDr. Florian Grätz int isokendpt_f(endpoint_t e, int * p, const int fatalflag)
1835433d6423SLionel Sambuc #endif
1836433d6423SLionel Sambuc {
1837433d6423SLionel Sambuc int ok = 0;
1838433d6423SLionel Sambuc /* Convert an endpoint number into a process number.
1839433d6423SLionel Sambuc * Return nonzero if the process is alive with the corresponding
1840433d6423SLionel Sambuc * generation number, zero otherwise.
1841433d6423SLionel Sambuc *
1842433d6423SLionel Sambuc * This function is called with file and line number by the
1843433d6423SLionel Sambuc * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
1844433d6423SLionel Sambuc * otherwise without. This allows us to print the where the
1845433d6423SLionel Sambuc * conversion was attempted, making the errors verbose without
1846433d6423SLionel Sambuc * adding code for that at every call.
1847433d6423SLionel Sambuc *
1848433d6423SLionel Sambuc * If fatalflag is nonzero, we must panic if the conversion doesn't
1849433d6423SLionel Sambuc * succeed.
1850433d6423SLionel Sambuc */
1851433d6423SLionel Sambuc *p = _ENDPOINT_P(e);
1852433d6423SLionel Sambuc ok = 0;
1853433d6423SLionel Sambuc if(isokprocn(*p) && !isemptyn(*p) && proc_addr(*p)->p_endpoint == e)
1854433d6423SLionel Sambuc ok = 1;
1855433d6423SLionel Sambuc if(!ok && fatalflag)
1856433d6423SLionel Sambuc panic("invalid endpoint: %d", e);
1857433d6423SLionel Sambuc return ok;
1858433d6423SLionel Sambuc }
1859433d6423SLionel Sambuc
notify_scheduler(struct proc * p)1860433d6423SLionel Sambuc static void notify_scheduler(struct proc *p)
1861433d6423SLionel Sambuc {
1862433d6423SLionel Sambuc message m_no_quantum;
1863433d6423SLionel Sambuc int err;
1864433d6423SLionel Sambuc
1865433d6423SLionel Sambuc assert(!proc_kernel_scheduler(p));
1866433d6423SLionel Sambuc
1867433d6423SLionel Sambuc /* dequeue the process */
1868433d6423SLionel Sambuc RTS_SET(p, RTS_NO_QUANTUM);
1869433d6423SLionel Sambuc /*
1870433d6423SLionel Sambuc * Notify the process's scheduler that it has run out of
1871433d6423SLionel Sambuc * quantum. This is done by sending a message to the scheduler
1872433d6423SLionel Sambuc * on the process's behalf
1873433d6423SLionel Sambuc */
1874433d6423SLionel Sambuc m_no_quantum.m_source = p->p_endpoint;
1875433d6423SLionel Sambuc m_no_quantum.m_type = SCHEDULING_NO_QUANTUM;
1876433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_queue = cpu_time_2_ms(p->p_accounting.time_in_queue);
1877433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_deqs = p->p_accounting.dequeues;
1878433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_ipc_sync = p->p_accounting.ipc_sync;
1879433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_ipc_async = p->p_accounting.ipc_async;
1880433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_preempt = p->p_accounting.preempted;
1881433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_cpu = cpuid;
1882433d6423SLionel Sambuc m_no_quantum.m_krn_lsys_schedule.acnt_cpu_load = cpu_load();
1883433d6423SLionel Sambuc
1884433d6423SLionel Sambuc /* Reset accounting */
1885433d6423SLionel Sambuc reset_proc_accounting(p);
1886433d6423SLionel Sambuc
1887433d6423SLionel Sambuc if ((err = mini_send(p, p->p_scheduler->p_endpoint,
1888433d6423SLionel Sambuc &m_no_quantum, FROM_KERNEL))) {
1889433d6423SLionel Sambuc panic("WARNING: Scheduling: mini_send returned %d\n", err);
1890433d6423SLionel Sambuc }
1891433d6423SLionel Sambuc }
1892433d6423SLionel Sambuc
proc_no_time(struct proc * p)1893433d6423SLionel Sambuc void proc_no_time(struct proc * p)
1894433d6423SLionel Sambuc {
1895433d6423SLionel Sambuc if (!proc_kernel_scheduler(p) && priv(p)->s_flags & PREEMPTIBLE) {
1896433d6423SLionel Sambuc /* this dequeues the process */
1897433d6423SLionel Sambuc notify_scheduler(p);
1898433d6423SLionel Sambuc }
1899433d6423SLionel Sambuc else {
1900433d6423SLionel Sambuc /*
1901433d6423SLionel Sambuc * non-preemptible processes only need their quantum to
1902433d6423SLionel Sambuc * be renewed. In fact, they by pass scheduling
1903433d6423SLionel Sambuc */
1904433d6423SLionel Sambuc p->p_cpu_time_left = ms_2_cpu_time(p->p_quantum_size_ms);
1905433d6423SLionel Sambuc #if DEBUG_RACE
1906433d6423SLionel Sambuc RTS_SET(p, RTS_PREEMPTED);
1907433d6423SLionel Sambuc RTS_UNSET(p, RTS_PREEMPTED);
1908433d6423SLionel Sambuc #endif
1909433d6423SLionel Sambuc }
1910433d6423SLionel Sambuc }
1911433d6423SLionel Sambuc
reset_proc_accounting(struct proc * p)1912433d6423SLionel Sambuc void reset_proc_accounting(struct proc *p)
1913433d6423SLionel Sambuc {
1914433d6423SLionel Sambuc p->p_accounting.preempted = 0;
1915433d6423SLionel Sambuc p->p_accounting.ipc_sync = 0;
1916433d6423SLionel Sambuc p->p_accounting.ipc_async = 0;
1917433d6423SLionel Sambuc p->p_accounting.dequeues = 0;
1918433d6423SLionel Sambuc p->p_accounting.time_in_queue = 0;
1919433d6423SLionel Sambuc p->p_accounting.enter_queue = 0;
1920433d6423SLionel Sambuc }
1921433d6423SLionel Sambuc
copr_not_available_handler(void)1922433d6423SLionel Sambuc void copr_not_available_handler(void)
1923433d6423SLionel Sambuc {
1924433d6423SLionel Sambuc struct proc * p;
1925433d6423SLionel Sambuc struct proc ** local_fpu_owner;
1926433d6423SLionel Sambuc /*
1927433d6423SLionel Sambuc * Disable the FPU exception (both for the kernel and for the process
1928433d6423SLionel Sambuc * once it's scheduled), and initialize or restore the FPU state.
1929433d6423SLionel Sambuc */
1930433d6423SLionel Sambuc
1931433d6423SLionel Sambuc disable_fpu_exception();
1932433d6423SLionel Sambuc
1933433d6423SLionel Sambuc p = get_cpulocal_var(proc_ptr);
1934433d6423SLionel Sambuc
1935433d6423SLionel Sambuc /* if FPU is not owned by anyone, do not store anything */
1936433d6423SLionel Sambuc local_fpu_owner = get_cpulocal_var_ptr(fpu_owner);
1937433d6423SLionel Sambuc if (*local_fpu_owner != NULL) {
1938433d6423SLionel Sambuc assert(*local_fpu_owner != p);
1939433d6423SLionel Sambuc save_local_fpu(*local_fpu_owner, FALSE /*retain*/);
1940433d6423SLionel Sambuc }
1941433d6423SLionel Sambuc
1942433d6423SLionel Sambuc /*
1943433d6423SLionel Sambuc * restore the current process' state and let it run again, do not
1944433d6423SLionel Sambuc * schedule!
1945433d6423SLionel Sambuc */
1946433d6423SLionel Sambuc if (restore_fpu(p) != OK) {
1947433d6423SLionel Sambuc /* Restoring FPU state failed. This is always the process's own
1948433d6423SLionel Sambuc * fault. Send a signal, and schedule another process instead.
1949433d6423SLionel Sambuc */
1950433d6423SLionel Sambuc *local_fpu_owner = NULL; /* release FPU */
1951433d6423SLionel Sambuc cause_sig(proc_nr(p), SIGFPE);
1952433d6423SLionel Sambuc return;
1953433d6423SLionel Sambuc }
1954433d6423SLionel Sambuc
1955433d6423SLionel Sambuc *local_fpu_owner = p;
1956433d6423SLionel Sambuc context_stop(proc_addr(KERNEL));
1957433d6423SLionel Sambuc restore_user_context(p);
1958433d6423SLionel Sambuc NOT_REACHABLE;
1959433d6423SLionel Sambuc }
1960433d6423SLionel Sambuc
release_fpu(struct proc * p)1961433d6423SLionel Sambuc void release_fpu(struct proc * p) {
1962433d6423SLionel Sambuc struct proc ** fpu_owner_ptr;
1963433d6423SLionel Sambuc
1964433d6423SLionel Sambuc fpu_owner_ptr = get_cpu_var_ptr(p->p_cpu, fpu_owner);
1965433d6423SLionel Sambuc
1966433d6423SLionel Sambuc if (*fpu_owner_ptr == p)
1967433d6423SLionel Sambuc *fpu_owner_ptr = NULL;
1968433d6423SLionel Sambuc }
1969433d6423SLionel Sambuc
ser_dump_proc(void)19706077d1adSDr. Florian Grätz void ser_dump_proc(void)
1971433d6423SLionel Sambuc {
1972433d6423SLionel Sambuc struct proc *pp;
1973433d6423SLionel Sambuc
1974433d6423SLionel Sambuc for (pp= BEG_PROC_ADDR; pp < END_PROC_ADDR; pp++)
1975433d6423SLionel Sambuc {
1976433d6423SLionel Sambuc if (isemptyp(pp))
1977433d6423SLionel Sambuc continue;
1978433d6423SLionel Sambuc print_proc_recursive(pp);
1979433d6423SLionel Sambuc }
1980433d6423SLionel Sambuc }
1981