xref: /minix3/minix/servers/pm/event.c (revision 637f688f0d4ccff0fa8a6ddafcd1d89fdfbc462d)
1*910831cbSDavid van Moolenbroek /*
2*910831cbSDavid van Moolenbroek  * This file implements a generic process event publish/subscribe facility.
3*910831cbSDavid van Moolenbroek  * The facility for use by non-core system services that implement part of the
4*910831cbSDavid van Moolenbroek  * userland system call interface.  Currently, it supports two events: a
5*910831cbSDavid van Moolenbroek  * process catching a signal, and a process being terminated.  A subscribing
6*910831cbSDavid van Moolenbroek  * service would typically use such events to interrupt a blocking system call
7*910831cbSDavid van Moolenbroek  * and/or clean up process-bound resources.  As of writing, the only service
8*910831cbSDavid van Moolenbroek  * that uses this facility is the System V IPC server.
9*910831cbSDavid van Moolenbroek  *
10*910831cbSDavid van Moolenbroek  * Each of these events will be published to subscribing services right after
11*910831cbSDavid van Moolenbroek  * VFS has acknowledged that it has processed the same event.  For each
12*910831cbSDavid van Moolenbroek  * subscriber, in turn, the process will be blocked (with the EVENT_CALL flag
13*910831cbSDavid van Moolenbroek  * set) until the subscriber acknowledges the event or PM learns that the
14*910831cbSDavid van Moolenbroek  * subscriber has died.  Thus, each subscriber adds a serialized messaging
15*910831cbSDavid van Moolenbroek  * roundtrip for each subscribed event.
16*910831cbSDavid van Moolenbroek  *
17*910831cbSDavid van Moolenbroek  * The one and only reason for this synchronous, serialized approach is that it
18*910831cbSDavid van Moolenbroek  * avoids PM queuing up too many asynchronous messages.  In theory, each
19*910831cbSDavid van Moolenbroek  * running process may have an event pending, and thus, the serial synchronous
20*910831cbSDavid van Moolenbroek  * approach requires NR_PROCS asynsend slots.  For a parallel synchronous
21*910831cbSDavid van Moolenbroek  * approach, this would increase to (NR_PROCS*NR_SUBS).  Worse yet, for an
22*910831cbSDavid van Moolenbroek  * asynchronous event notification approach, the number of messages that PM can
23*910831cbSDavid van Moolenbroek  * end up queuing is potentially unbounded, so that is certainly not an option.
24*910831cbSDavid van Moolenbroek  * At this moment, we expect only one subscriber (the IPC server) which makes
25*910831cbSDavid van Moolenbroek  * the serial vs parallel point less relevant.
26*910831cbSDavid van Moolenbroek  *
27*910831cbSDavid van Moolenbroek  * It is not possible to subscribe to events from certain processes only.  If
28*910831cbSDavid van Moolenbroek  * a service were to subscribe to process events as part of a system call by
29*910831cbSDavid van Moolenbroek  * a process (e.g., semop(2) in the case of the IPC server), it may subscribe
30*910831cbSDavid van Moolenbroek  * "too late" and already have missed a signal event for the process calling
31*910831cbSDavid van Moolenbroek  * semop(2), for example.  Resolving such race conditions would require major
32*910831cbSDavid van Moolenbroek  * infrastructure changes.
33*910831cbSDavid van Moolenbroek  *
34*910831cbSDavid van Moolenbroek  * A server may however change its event subscription mask at runtime, so as to
35*910831cbSDavid van Moolenbroek  * limit the number of event messages it receives in a crude fashion.  For the
36*910831cbSDavid van Moolenbroek  * same race-condition reasons, new subscriptions must always be made when
37*910831cbSDavid van Moolenbroek  * processing a message that is *not* a system call potentially affected by
38*910831cbSDavid van Moolenbroek  * events.  In the case of the IPC server, it may subscribe to events from
39*910831cbSDavid van Moolenbroek  * semget(2) but not semop(2).  For signal events, the delay call system
40*910831cbSDavid van Moolenbroek  * guarantees the safety of this approach; for exit events, the message type
41*910831cbSDavid van Moolenbroek  * prioritization does (which is not great; see the TODO item in forkexit.c).
42*910831cbSDavid van Moolenbroek  *
43*910831cbSDavid van Moolenbroek  * After changing its mask, a subscribing service may still receive messages
44*910831cbSDavid van Moolenbroek  * for events it is no longer subscribed to.  It should acknowledge these
45*910831cbSDavid van Moolenbroek  * messages by sending a reply as usual.
46*910831cbSDavid van Moolenbroek  */
47*910831cbSDavid van Moolenbroek 
48*910831cbSDavid van Moolenbroek #include "pm.h"
49*910831cbSDavid van Moolenbroek #include "mproc.h"
50*910831cbSDavid van Moolenbroek #include <assert.h>
51*910831cbSDavid van Moolenbroek 
52*910831cbSDavid van Moolenbroek /*
53*910831cbSDavid van Moolenbroek  * A realistic upper bound for the number of subscribing services.  The process
54*910831cbSDavid van Moolenbroek  * event notification system adds a round trip to a service for each subscriber
55*910831cbSDavid van Moolenbroek  * and uses asynchronous messaging to boot, so clearly it does not scale to
56*910831cbSDavid van Moolenbroek  * numbers larger than this.
57*910831cbSDavid van Moolenbroek  */
58*910831cbSDavid van Moolenbroek #define NR_SUBS		4
59*910831cbSDavid van Moolenbroek 
60*910831cbSDavid van Moolenbroek static struct {
61*910831cbSDavid van Moolenbroek 	endpoint_t endpt;		/* endpoint of subscriber */
62*910831cbSDavid van Moolenbroek 	unsigned int mask;		/* interests bit mask (PROC_EVENT_) */
63*910831cbSDavid van Moolenbroek 	unsigned int waiting;		/* # procs blocked on reply from it */
64*910831cbSDavid van Moolenbroek } subs[NR_SUBS];
65*910831cbSDavid van Moolenbroek 
66*910831cbSDavid van Moolenbroek static unsigned int nsubs = 0;
67*910831cbSDavid van Moolenbroek static unsigned int nested = 0;
68*910831cbSDavid van Moolenbroek 
69*910831cbSDavid van Moolenbroek /*
70*910831cbSDavid van Moolenbroek  * For the current event of the given process, as determined by its flags, send
71*910831cbSDavid van Moolenbroek  * a process event message to the next subscriber, or resume handling the
72*910831cbSDavid van Moolenbroek  * event itself if there are no more subscribers to notify.
73*910831cbSDavid van Moolenbroek  */
74*910831cbSDavid van Moolenbroek static void
resume_event(struct mproc * rmp)75*910831cbSDavid van Moolenbroek resume_event(struct mproc * rmp)
76*910831cbSDavid van Moolenbroek {
77*910831cbSDavid van Moolenbroek 	message m;
78*910831cbSDavid van Moolenbroek 	unsigned int i, event;
79*910831cbSDavid van Moolenbroek 	int r;
80*910831cbSDavid van Moolenbroek 
81*910831cbSDavid van Moolenbroek 	assert(rmp->mp_flags & IN_USE);
82*910831cbSDavid van Moolenbroek 	assert(rmp->mp_flags & EVENT_CALL);
83*910831cbSDavid van Moolenbroek 	assert(rmp->mp_eventsub != NO_EVENTSUB);
84*910831cbSDavid van Moolenbroek 
85*910831cbSDavid van Moolenbroek 	/* Which event should we be concerned about? */
86*910831cbSDavid van Moolenbroek 	if (rmp->mp_flags & EXITING)
87*910831cbSDavid van Moolenbroek 		event = PROC_EVENT_EXIT;
88*910831cbSDavid van Moolenbroek 	else if (rmp->mp_flags & UNPAUSED)
89*910831cbSDavid van Moolenbroek 		event = PROC_EVENT_SIGNAL;
90*910831cbSDavid van Moolenbroek 	else
91*910831cbSDavid van Moolenbroek 		panic("unknown event for flags %x", rmp->mp_flags);
92*910831cbSDavid van Moolenbroek 
93*910831cbSDavid van Moolenbroek 	/*
94*910831cbSDavid van Moolenbroek 	 * If there are additional services interested in this event, send a
95*910831cbSDavid van Moolenbroek 	 * message to the next one.
96*910831cbSDavid van Moolenbroek 	 */
97*910831cbSDavid van Moolenbroek 	for (i = rmp->mp_eventsub; i < nsubs; i++, rmp->mp_eventsub++) {
98*910831cbSDavid van Moolenbroek 		if (subs[i].mask & event) {
99*910831cbSDavid van Moolenbroek 			memset(&m, 0, sizeof(m));
100*910831cbSDavid van Moolenbroek 			m.m_type = PROC_EVENT;
101*910831cbSDavid van Moolenbroek 			m.m_pm_lsys_proc_event.endpt = rmp->mp_endpoint;
102*910831cbSDavid van Moolenbroek 			m.m_pm_lsys_proc_event.event = event;
103*910831cbSDavid van Moolenbroek 
104*910831cbSDavid van Moolenbroek 			r = asynsend3(subs[i].endpt, &m, AMF_NOREPLY);
105*910831cbSDavid van Moolenbroek 			if (r != OK)
106*910831cbSDavid van Moolenbroek 				panic("asynsend failed: %d", r);
107*910831cbSDavid van Moolenbroek 
108*910831cbSDavid van Moolenbroek 			assert(subs[i].waiting < NR_PROCS);
109*910831cbSDavid van Moolenbroek 			subs[i].waiting++;
110*910831cbSDavid van Moolenbroek 
111*910831cbSDavid van Moolenbroek 			return;
112*910831cbSDavid van Moolenbroek 		}
113*910831cbSDavid van Moolenbroek 	}
114*910831cbSDavid van Moolenbroek 
115*910831cbSDavid van Moolenbroek 	/* No more subscribers to be notified, resume the actual event. */
116*910831cbSDavid van Moolenbroek 	rmp->mp_flags &= ~EVENT_CALL;
117*910831cbSDavid van Moolenbroek 	rmp->mp_eventsub = NO_EVENTSUB;
118*910831cbSDavid van Moolenbroek 
119*910831cbSDavid van Moolenbroek 	if (event == PROC_EVENT_EXIT)
120*910831cbSDavid van Moolenbroek 		exit_restart(rmp);
121*910831cbSDavid van Moolenbroek 	else if (event == PROC_EVENT_SIGNAL)
122*910831cbSDavid van Moolenbroek 		restart_sigs(rmp);
123*910831cbSDavid van Moolenbroek }
124*910831cbSDavid van Moolenbroek 
125*910831cbSDavid van Moolenbroek /*
126*910831cbSDavid van Moolenbroek  * Remove a subscriber from the set, forcefully if we have to.  Ensure that
127*910831cbSDavid van Moolenbroek  * any processes currently subject to process event notification are updated
128*910831cbSDavid van Moolenbroek  * accordingly, in a way that no services are skipped for process events.
129*910831cbSDavid van Moolenbroek  */
130*910831cbSDavid van Moolenbroek static void
remove_sub(unsigned int slot)131*910831cbSDavid van Moolenbroek remove_sub(unsigned int slot)
132*910831cbSDavid van Moolenbroek {
133*910831cbSDavid van Moolenbroek 	struct mproc *rmp;
134*910831cbSDavid van Moolenbroek 	unsigned int i;
135*910831cbSDavid van Moolenbroek 
136*910831cbSDavid van Moolenbroek 	/* The loop below needs the remaining items to be kept in order. */
137*910831cbSDavid van Moolenbroek 	for (i = slot; i < nsubs - 1; i++)
138*910831cbSDavid van Moolenbroek 		subs[i] = subs[i + 1];
139*910831cbSDavid van Moolenbroek 	nsubs--;
140*910831cbSDavid van Moolenbroek 
141*910831cbSDavid van Moolenbroek 	/* Adjust affected processes' event subscriber indexes to match. */
142*910831cbSDavid van Moolenbroek 	for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
143*910831cbSDavid van Moolenbroek 		if ((rmp->mp_flags & (IN_USE | EVENT_CALL)) !=
144*910831cbSDavid van Moolenbroek 		    (IN_USE | EVENT_CALL))
145*910831cbSDavid van Moolenbroek 			continue;
146*910831cbSDavid van Moolenbroek 		assert(rmp->mp_eventsub != NO_EVENTSUB);
147*910831cbSDavid van Moolenbroek 
148*910831cbSDavid van Moolenbroek 		/*
149*910831cbSDavid van Moolenbroek 		 * While resuming a process could trigger new events, event
150*910831cbSDavid van Moolenbroek 		 * calls always take place after the corresponding VFS calls,
151*910831cbSDavid van Moolenbroek 		 * making this nesting-safe.  Check anyway, because if nesting
152*910831cbSDavid van Moolenbroek 		 * does occur, we are in serious (un-debuggable) trouble.
153*910831cbSDavid van Moolenbroek 		 */
154*910831cbSDavid van Moolenbroek 		if ((unsigned int)rmp->mp_eventsub == slot) {
155*910831cbSDavid van Moolenbroek 			nested++;
156*910831cbSDavid van Moolenbroek 			resume_event(rmp);
157*910831cbSDavid van Moolenbroek 			nested--;
158*910831cbSDavid van Moolenbroek 		} else if ((unsigned int)rmp->mp_eventsub > slot)
159*910831cbSDavid van Moolenbroek 			rmp->mp_eventsub--;
160*910831cbSDavid van Moolenbroek 	}
161*910831cbSDavid van Moolenbroek }
162*910831cbSDavid van Moolenbroek 
163*910831cbSDavid van Moolenbroek /*
164*910831cbSDavid van Moolenbroek  * Subscribe to process events.  The given event mask denotes the events in
165*910831cbSDavid van Moolenbroek  * which the caller is interested.  Multiple calls will each replace the mask,
166*910831cbSDavid van Moolenbroek  * and a mask of zero will unsubscribe the service from events altogether.
167*910831cbSDavid van Moolenbroek  * Return OK on success, EPERM if the caller may not register for events, or
168*910831cbSDavid van Moolenbroek  * ENOMEM if all subscriber slots are in use already.
169*910831cbSDavid van Moolenbroek  */
170*910831cbSDavid van Moolenbroek int
do_proceventmask(void)171*910831cbSDavid van Moolenbroek do_proceventmask(void)
172*910831cbSDavid van Moolenbroek {
173*910831cbSDavid van Moolenbroek 	unsigned int i, mask;
174*910831cbSDavid van Moolenbroek 
175*910831cbSDavid van Moolenbroek 	/* This call is for system services only. */
176*910831cbSDavid van Moolenbroek 	if (!(mp->mp_flags & PRIV_PROC))
177*910831cbSDavid van Moolenbroek 		return EPERM;
178*910831cbSDavid van Moolenbroek 
179*910831cbSDavid van Moolenbroek 	mask = m_in.m_lsys_pm_proceventmask.mask;
180*910831cbSDavid van Moolenbroek 
181*910831cbSDavid van Moolenbroek 	/*
182*910831cbSDavid van Moolenbroek 	 * First check if we need to update or remove an existing entry.
183*910831cbSDavid van Moolenbroek 	 * We cannot actually remove services for which we are still waiting
184*910831cbSDavid van Moolenbroek 	 * for a reply, so set their mask to zero for later removal instead.
185*910831cbSDavid van Moolenbroek 	 */
186*910831cbSDavid van Moolenbroek 	for (i = 0; i < nsubs; i++) {
187*910831cbSDavid van Moolenbroek 		if (subs[i].endpt == who_e) {
188*910831cbSDavid van Moolenbroek 			if (mask == 0 && subs[i].waiting == 0)
189*910831cbSDavid van Moolenbroek 				remove_sub(i);
190*910831cbSDavid van Moolenbroek 			else
191*910831cbSDavid van Moolenbroek 				subs[i].mask = mask;
192*910831cbSDavid van Moolenbroek 			return OK;
193*910831cbSDavid van Moolenbroek 		}
194*910831cbSDavid van Moolenbroek 	}
195*910831cbSDavid van Moolenbroek 
196*910831cbSDavid van Moolenbroek 	/* Add a new entry, unless the given mask is empty. */
197*910831cbSDavid van Moolenbroek 	if (mask == 0)
198*910831cbSDavid van Moolenbroek 		return OK;
199*910831cbSDavid van Moolenbroek 
200*910831cbSDavid van Moolenbroek 	/* This case should never trigger. */
201*910831cbSDavid van Moolenbroek 	if (nsubs == __arraycount(subs)) {
202*910831cbSDavid van Moolenbroek 		printf("PM: too many process event subscribers!\n");
203*910831cbSDavid van Moolenbroek 		return ENOMEM;
204*910831cbSDavid van Moolenbroek 	}
205*910831cbSDavid van Moolenbroek 
206*910831cbSDavid van Moolenbroek 	subs[nsubs].endpt = who_e;
207*910831cbSDavid van Moolenbroek 	subs[nsubs].mask = mask;
208*910831cbSDavid van Moolenbroek 	nsubs++;
209*910831cbSDavid van Moolenbroek 
210*910831cbSDavid van Moolenbroek 	return OK;
211*910831cbSDavid van Moolenbroek }
212*910831cbSDavid van Moolenbroek 
213*910831cbSDavid van Moolenbroek /*
214*910831cbSDavid van Moolenbroek  * A subscribing service has replied to a process event message from us, or at
215*910831cbSDavid van Moolenbroek  * least that is what should have happened.  First make sure of this, and then
216*910831cbSDavid van Moolenbroek  * resume event handling for the affected process.
217*910831cbSDavid van Moolenbroek  */
218*910831cbSDavid van Moolenbroek int
do_proc_event_reply(void)219*910831cbSDavid van Moolenbroek do_proc_event_reply(void)
220*910831cbSDavid van Moolenbroek {
221*910831cbSDavid van Moolenbroek 	struct mproc *rmp;
222*910831cbSDavid van Moolenbroek 	endpoint_t endpt;
223*910831cbSDavid van Moolenbroek 	unsigned int i, event;
224*910831cbSDavid van Moolenbroek 	int slot;
225*910831cbSDavid van Moolenbroek 
226*910831cbSDavid van Moolenbroek 	assert(nested == 0);
227*910831cbSDavid van Moolenbroek 
228*910831cbSDavid van Moolenbroek 	/*
229*910831cbSDavid van Moolenbroek 	 * Is this an accidental call from a misguided user process?
230*910831cbSDavid van Moolenbroek 	 * Politely tell it to go away.
231*910831cbSDavid van Moolenbroek 	 */
232*910831cbSDavid van Moolenbroek 	if (!(mp->mp_flags & PRIV_PROC))
233*910831cbSDavid van Moolenbroek 		return ENOSYS;
234*910831cbSDavid van Moolenbroek 
235*910831cbSDavid van Moolenbroek 	/*
236*910831cbSDavid van Moolenbroek 	 * Ensure that we got the reply that we want.  Since this code is
237*910831cbSDavid van Moolenbroek 	 * relatively new, produce lots of warnings for cases that should never
238*910831cbSDavid van Moolenbroek 	 * or rarely occur.  Later we can just ignore all mismatching replies.
239*910831cbSDavid van Moolenbroek 	 */
240*910831cbSDavid van Moolenbroek 	endpt = m_in.m_pm_lsys_proc_event.endpt;
241*910831cbSDavid van Moolenbroek 	if (pm_isokendpt(endpt, &slot) != OK) {
242*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply from %d for invalid endpt %d\n",
243*910831cbSDavid van Moolenbroek 		    who_e, endpt);
244*910831cbSDavid van Moolenbroek 		return SUSPEND;
245*910831cbSDavid van Moolenbroek 	}
246*910831cbSDavid van Moolenbroek 	rmp = &mproc[slot];
247*910831cbSDavid van Moolenbroek 	if (!(rmp->mp_flags & EVENT_CALL)) {
248*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply from %d for endpt %d, no event\n",
249*910831cbSDavid van Moolenbroek 		    who_e, endpt);
250*910831cbSDavid van Moolenbroek 		return SUSPEND;
251*910831cbSDavid van Moolenbroek 	}
252*910831cbSDavid van Moolenbroek 	if (rmp->mp_eventsub == NO_EVENTSUB ||
253*910831cbSDavid van Moolenbroek 	    (unsigned int)rmp->mp_eventsub >= nsubs) {
254*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply from %d for endpt %d index %d\n",
255*910831cbSDavid van Moolenbroek 		    who_e, endpt, rmp->mp_eventsub);
256*910831cbSDavid van Moolenbroek 		return SUSPEND;
257*910831cbSDavid van Moolenbroek 	}
258*910831cbSDavid van Moolenbroek 	i = rmp->mp_eventsub;
259*910831cbSDavid van Moolenbroek 	if (subs[i].endpt != who_e) {
260*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply for %d from %d instead of %d\n",
261*910831cbSDavid van Moolenbroek 		    endpt, who_e, subs[i].endpt);
262*910831cbSDavid van Moolenbroek 		return SUSPEND;
263*910831cbSDavid van Moolenbroek 	}
264*910831cbSDavid van Moolenbroek 
265*910831cbSDavid van Moolenbroek 	if (rmp->mp_flags & EXITING)
266*910831cbSDavid van Moolenbroek 		event = PROC_EVENT_EXIT;
267*910831cbSDavid van Moolenbroek 	else if (rmp->mp_flags & UNPAUSED)
268*910831cbSDavid van Moolenbroek 		event = PROC_EVENT_SIGNAL;
269*910831cbSDavid van Moolenbroek 	else {
270*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply from %d for %d, bad flags %x\n",
271*910831cbSDavid van Moolenbroek 		    who_e, endpt, rmp->mp_flags);
272*910831cbSDavid van Moolenbroek 		return SUSPEND;
273*910831cbSDavid van Moolenbroek 	}
274*910831cbSDavid van Moolenbroek 	if (m_in.m_pm_lsys_proc_event.event != event) {
275*910831cbSDavid van Moolenbroek 		printf("PM: proc event reply from %d for %d for event %d "
276*910831cbSDavid van Moolenbroek 		    "instead of %d\n", who_e, endpt,
277*910831cbSDavid van Moolenbroek 		    m_in.m_pm_lsys_proc_event.event, event);
278*910831cbSDavid van Moolenbroek 		return SUSPEND;
279*910831cbSDavid van Moolenbroek 	}
280*910831cbSDavid van Moolenbroek 	/*
281*910831cbSDavid van Moolenbroek 	 * Do NOT check the event against the subscriber's event mask, since a
282*910831cbSDavid van Moolenbroek 	 * service may have unsubscribed from an event while it has yet to
283*910831cbSDavid van Moolenbroek 	 * process some leftover notifications for that event.  We could decide
284*910831cbSDavid van Moolenbroek 	 * not to wait for the replies to those leftover notifications upon
285*910831cbSDavid van Moolenbroek 	 * unsubscription, but that could result in problems upon quick
286*910831cbSDavid van Moolenbroek 	 * resubscription, and such cases may in fact happen in practice.
287*910831cbSDavid van Moolenbroek 	 */
288*910831cbSDavid van Moolenbroek 
289*910831cbSDavid van Moolenbroek 	assert(subs[i].waiting > 0);
290*910831cbSDavid van Moolenbroek 	subs[i].waiting--;
291*910831cbSDavid van Moolenbroek 
292*910831cbSDavid van Moolenbroek 	/*
293*910831cbSDavid van Moolenbroek 	 * If we are now no longer waiting for any replies from an already
294*910831cbSDavid van Moolenbroek 	 * unsubscribed (but alive) service, remove it from the set now; this
295*910831cbSDavid van Moolenbroek 	 * will also resume events for the current process.  In the normal case
296*910831cbSDavid van Moolenbroek 	 * however, let the current process move on to the next subscriber if
297*910831cbSDavid van Moolenbroek 	 * there are more, and the actual event otherwise.
298*910831cbSDavid van Moolenbroek 	 */
299*910831cbSDavid van Moolenbroek 	if (subs[i].mask == 0 && subs[i].waiting == 0) {
300*910831cbSDavid van Moolenbroek 		remove_sub(i);
301*910831cbSDavid van Moolenbroek 	} else {
302*910831cbSDavid van Moolenbroek 		rmp->mp_eventsub++;
303*910831cbSDavid van Moolenbroek 
304*910831cbSDavid van Moolenbroek 		resume_event(rmp);
305*910831cbSDavid van Moolenbroek 	}
306*910831cbSDavid van Moolenbroek 
307*910831cbSDavid van Moolenbroek 	/* In any case, do not reply to this reply message. */
308*910831cbSDavid van Moolenbroek 	return SUSPEND;
309*910831cbSDavid van Moolenbroek }
310*910831cbSDavid van Moolenbroek 
311*910831cbSDavid van Moolenbroek /*
312*910831cbSDavid van Moolenbroek  * Publish a process event to interested subscribers.  The event is determined
313*910831cbSDavid van Moolenbroek  * from the process flags.  In addition, if the event is a process exit, also
314*910831cbSDavid van Moolenbroek  * check if it is a subscribing service that died.
315*910831cbSDavid van Moolenbroek  */
316*910831cbSDavid van Moolenbroek void
publish_event(struct mproc * rmp)317*910831cbSDavid van Moolenbroek publish_event(struct mproc * rmp)
318*910831cbSDavid van Moolenbroek {
319*910831cbSDavid van Moolenbroek 	unsigned int i;
320*910831cbSDavid van Moolenbroek 
321*910831cbSDavid van Moolenbroek 	assert(nested == 0);
322*910831cbSDavid van Moolenbroek 	assert((rmp->mp_flags & (IN_USE | EVENT_CALL)) == IN_USE);
323*910831cbSDavid van Moolenbroek 	assert(rmp->mp_eventsub == NO_EVENTSUB);
324*910831cbSDavid van Moolenbroek 
325*910831cbSDavid van Moolenbroek 	/*
326*910831cbSDavid van Moolenbroek 	 * If a system service exited, we have to check if it was subscribed to
327*910831cbSDavid van Moolenbroek 	 * process events.  If so, we have to remove it from the set and resume
328*910831cbSDavid van Moolenbroek 	 * any processes blocked on an event call to that service.
329*910831cbSDavid van Moolenbroek 	 */
330*910831cbSDavid van Moolenbroek 	if ((rmp->mp_flags & (PRIV_PROC | EXITING)) == (PRIV_PROC | EXITING)) {
331*910831cbSDavid van Moolenbroek 		for (i = 0; i < nsubs; i++) {
332*910831cbSDavid van Moolenbroek 			if (subs[i].endpt == rmp->mp_endpoint) {
333*910831cbSDavid van Moolenbroek 				/*
334*910831cbSDavid van Moolenbroek 				 * If the wait count is nonzero, we may or may
335*910831cbSDavid van Moolenbroek 				 * not get additional replies from this service
336*910831cbSDavid van Moolenbroek 				 * later.  Those will be ignored.
337*910831cbSDavid van Moolenbroek 				 */
338*910831cbSDavid van Moolenbroek 				remove_sub(i);
339*910831cbSDavid van Moolenbroek 
340*910831cbSDavid van Moolenbroek 				break;
341*910831cbSDavid van Moolenbroek 			}
342*910831cbSDavid van Moolenbroek 		}
343*910831cbSDavid van Moolenbroek 	}
344*910831cbSDavid van Moolenbroek 
345*910831cbSDavid van Moolenbroek 	/*
346*910831cbSDavid van Moolenbroek 	 * Either send an event message to the first subscriber, or if there
347*910831cbSDavid van Moolenbroek 	 * are no subscribers, resume processing the event right away.
348*910831cbSDavid van Moolenbroek 	 */
349*910831cbSDavid van Moolenbroek 	rmp->mp_flags |= EVENT_CALL;
350*910831cbSDavid van Moolenbroek 	rmp->mp_eventsub = 0;
351*910831cbSDavid van Moolenbroek 
352*910831cbSDavid van Moolenbroek 	resume_event(rmp);
353*910831cbSDavid van Moolenbroek }
354