1433d6423SLionel Sambuc #include "fs.h"
2728b0e5bSDavid van Moolenbroek #include <string.h>
3433d6423SLionel Sambuc #include <assert.h>
4433d6423SLionel Sambuc
5433d6423SLionel Sambuc static void *worker_main(void *arg);
6433d6423SLionel Sambuc static void worker_sleep(void);
7433d6423SLionel Sambuc static void worker_wake(struct worker_thread *worker);
87eb698eaSDavid van Moolenbroek
9433d6423SLionel Sambuc static mthread_attr_t tattr;
107eb698eaSDavid van Moolenbroek static unsigned int pending;
117eb698eaSDavid van Moolenbroek static unsigned int busy;
127eb698eaSDavid van Moolenbroek static int block_all;
13433d6423SLionel Sambuc
14ef52a26bSDavid van Moolenbroek #if defined(_MINIX_MAGIC)
15ef52a26bSDavid van Moolenbroek # define TH_STACKSIZE (64 * 1024)
16ef52a26bSDavid van Moolenbroek #elif defined(MKCOVERAGE)
17433d6423SLionel Sambuc # define TH_STACKSIZE (40 * 1024)
18433d6423SLionel Sambuc #else
19433d6423SLionel Sambuc # define TH_STACKSIZE (28 * 1024)
20433d6423SLionel Sambuc #endif
21433d6423SLionel Sambuc
22433d6423SLionel Sambuc #define ASSERTW(w) assert((w) >= &workers[0] && (w) < &workers[NR_WTHREADS])
23433d6423SLionel Sambuc
24433d6423SLionel Sambuc /*===========================================================================*
25433d6423SLionel Sambuc * worker_init *
26433d6423SLionel Sambuc *===========================================================================*/
worker_init(void)27433d6423SLionel Sambuc void worker_init(void)
28433d6423SLionel Sambuc {
29728b0e5bSDavid van Moolenbroek /* Initialize worker threads */
30433d6423SLionel Sambuc struct worker_thread *wp;
31433d6423SLionel Sambuc int i;
32433d6423SLionel Sambuc
33433d6423SLionel Sambuc if (mthread_attr_init(&tattr) != 0)
34433d6423SLionel Sambuc panic("failed to initialize attribute");
35433d6423SLionel Sambuc if (mthread_attr_setstacksize(&tattr, TH_STACKSIZE) != 0)
36433d6423SLionel Sambuc panic("couldn't set default thread stack size");
37728b0e5bSDavid van Moolenbroek
38433d6423SLionel Sambuc pending = 0;
397eb698eaSDavid van Moolenbroek busy = 0;
407eb698eaSDavid van Moolenbroek block_all = FALSE;
41433d6423SLionel Sambuc
42433d6423SLionel Sambuc for (i = 0; i < NR_WTHREADS; i++) {
43433d6423SLionel Sambuc wp = &workers[i];
44433d6423SLionel Sambuc
45433d6423SLionel Sambuc wp->w_fp = NULL; /* Mark not in use */
46433d6423SLionel Sambuc wp->w_next = NULL;
47433d6423SLionel Sambuc wp->w_task = NONE;
48433d6423SLionel Sambuc if (mutex_init(&wp->w_event_mutex, NULL) != 0)
49433d6423SLionel Sambuc panic("failed to initialize mutex");
50433d6423SLionel Sambuc if (cond_init(&wp->w_event, NULL) != 0)
51728b0e5bSDavid van Moolenbroek panic("failed to initialize condition variable");
52433d6423SLionel Sambuc if (mthread_create(&wp->w_tid, &tattr, worker_main, (void *) wp) != 0)
53433d6423SLionel Sambuc panic("unable to start thread");
54433d6423SLionel Sambuc }
55433d6423SLionel Sambuc
56433d6423SLionel Sambuc /* Let all threads get ready to accept work. */
57728b0e5bSDavid van Moolenbroek worker_yield();
58728b0e5bSDavid van Moolenbroek }
59728b0e5bSDavid van Moolenbroek
60728b0e5bSDavid van Moolenbroek /*===========================================================================*
61728b0e5bSDavid van Moolenbroek * worker_cleanup *
62728b0e5bSDavid van Moolenbroek *===========================================================================*/
worker_cleanup(void)63728b0e5bSDavid van Moolenbroek void worker_cleanup(void)
64728b0e5bSDavid van Moolenbroek {
65728b0e5bSDavid van Moolenbroek /* Clean up worker threads, reversing the actions of worker_init() such that
66728b0e5bSDavid van Moolenbroek * we can safely call worker_init() again later. All worker threads are
67728b0e5bSDavid van Moolenbroek * expected to be idle already. Used for live updates, because transferring
68728b0e5bSDavid van Moolenbroek * the thread stacks from one version to another is currently not feasible.
69728b0e5bSDavid van Moolenbroek */
70728b0e5bSDavid van Moolenbroek struct worker_thread *wp;
71728b0e5bSDavid van Moolenbroek int i;
72728b0e5bSDavid van Moolenbroek
73728b0e5bSDavid van Moolenbroek assert(worker_idle());
74728b0e5bSDavid van Moolenbroek
75728b0e5bSDavid van Moolenbroek /* First terminate all threads. */
76728b0e5bSDavid van Moolenbroek for (i = 0; i < NR_WTHREADS; i++) {
77728b0e5bSDavid van Moolenbroek wp = &workers[i];
78728b0e5bSDavid van Moolenbroek
79728b0e5bSDavid van Moolenbroek assert(wp->w_fp == NULL);
80728b0e5bSDavid van Moolenbroek
81728b0e5bSDavid van Moolenbroek /* Waking up the thread with no w_fp will cause it to exit. */
82728b0e5bSDavid van Moolenbroek worker_wake(wp);
83728b0e5bSDavid van Moolenbroek }
84728b0e5bSDavid van Moolenbroek
85728b0e5bSDavid van Moolenbroek worker_yield();
86728b0e5bSDavid van Moolenbroek
87728b0e5bSDavid van Moolenbroek /* Then clean up their resources. */
88728b0e5bSDavid van Moolenbroek for (i = 0; i < NR_WTHREADS; i++) {
89728b0e5bSDavid van Moolenbroek wp = &workers[i];
90728b0e5bSDavid van Moolenbroek
91728b0e5bSDavid van Moolenbroek if (mthread_join(wp->w_tid, NULL) != 0)
92728b0e5bSDavid van Moolenbroek panic("worker_cleanup: could not join thread %d", i);
93728b0e5bSDavid van Moolenbroek if (cond_destroy(&wp->w_event) != 0)
94728b0e5bSDavid van Moolenbroek panic("failed to destroy condition variable");
95728b0e5bSDavid van Moolenbroek if (mutex_destroy(&wp->w_event_mutex) != 0)
96728b0e5bSDavid van Moolenbroek panic("failed to destroy mutex");
97728b0e5bSDavid van Moolenbroek }
98728b0e5bSDavid van Moolenbroek
99728b0e5bSDavid van Moolenbroek /* Finally, clean up global resources. */
100728b0e5bSDavid van Moolenbroek if (mthread_attr_destroy(&tattr) != 0)
101728b0e5bSDavid van Moolenbroek panic("failed to destroy attribute");
102728b0e5bSDavid van Moolenbroek
103728b0e5bSDavid van Moolenbroek memset(workers, 0, sizeof(workers));
104728b0e5bSDavid van Moolenbroek }
105728b0e5bSDavid van Moolenbroek
106728b0e5bSDavid van Moolenbroek /*===========================================================================*
107728b0e5bSDavid van Moolenbroek * worker_idle *
108728b0e5bSDavid van Moolenbroek *===========================================================================*/
worker_idle(void)109728b0e5bSDavid van Moolenbroek int worker_idle(void)
110728b0e5bSDavid van Moolenbroek {
111728b0e5bSDavid van Moolenbroek /* Return whether all worker threads are idle. */
112728b0e5bSDavid van Moolenbroek
113728b0e5bSDavid van Moolenbroek return (pending == 0 && busy == 0);
114433d6423SLionel Sambuc }
115433d6423SLionel Sambuc
116433d6423SLionel Sambuc /*===========================================================================*
1177eb698eaSDavid van Moolenbroek * worker_assign *
1187eb698eaSDavid van Moolenbroek *===========================================================================*/
worker_assign(struct fproc * rfp)1197eb698eaSDavid van Moolenbroek static void worker_assign(struct fproc *rfp)
1207eb698eaSDavid van Moolenbroek {
1217eb698eaSDavid van Moolenbroek /* Assign the work for the given process to a free thread. The caller must
1227eb698eaSDavid van Moolenbroek * ensure that there is in fact at least one free thread.
1237eb698eaSDavid van Moolenbroek */
1247eb698eaSDavid van Moolenbroek struct worker_thread *worker;
1257eb698eaSDavid van Moolenbroek int i;
1267eb698eaSDavid van Moolenbroek
1277eb698eaSDavid van Moolenbroek /* Find a free worker thread. */
1287eb698eaSDavid van Moolenbroek for (i = 0; i < NR_WTHREADS; i++) {
1297eb698eaSDavid van Moolenbroek worker = &workers[i];
1307eb698eaSDavid van Moolenbroek
1317eb698eaSDavid van Moolenbroek if (worker->w_fp == NULL)
1327eb698eaSDavid van Moolenbroek break;
1337eb698eaSDavid van Moolenbroek }
1347eb698eaSDavid van Moolenbroek assert(worker != NULL);
1357eb698eaSDavid van Moolenbroek
1367eb698eaSDavid van Moolenbroek /* Assign work to it. */
1377eb698eaSDavid van Moolenbroek rfp->fp_worker = worker;
1387eb698eaSDavid van Moolenbroek worker->w_fp = rfp;
1397eb698eaSDavid van Moolenbroek busy++;
1407eb698eaSDavid van Moolenbroek
1417eb698eaSDavid van Moolenbroek worker_wake(worker);
1427eb698eaSDavid van Moolenbroek }
1437eb698eaSDavid van Moolenbroek
1447eb698eaSDavid van Moolenbroek /*===========================================================================*
1457eb698eaSDavid van Moolenbroek * worker_may_do_pending *
1467eb698eaSDavid van Moolenbroek *===========================================================================*/
worker_may_do_pending(void)1477eb698eaSDavid van Moolenbroek static int worker_may_do_pending(void)
1487eb698eaSDavid van Moolenbroek {
1497eb698eaSDavid van Moolenbroek /* Return whether there is a free thread that may do pending work. This is true
1507eb698eaSDavid van Moolenbroek * only if there is pending work at all, and there is a free non-spare thread
1517eb698eaSDavid van Moolenbroek * (the spare thread is never used for pending work), and VFS is currently
1527eb698eaSDavid van Moolenbroek * processing new requests at all (this may not be true during initialization).
1537eb698eaSDavid van Moolenbroek */
1547eb698eaSDavid van Moolenbroek
1557eb698eaSDavid van Moolenbroek /* Ordered by likelihood to be false. */
1567eb698eaSDavid van Moolenbroek return (pending > 0 && worker_available() > 1 && !block_all);
1577eb698eaSDavid van Moolenbroek }
1587eb698eaSDavid van Moolenbroek
1597eb698eaSDavid van Moolenbroek /*===========================================================================*
1607eb698eaSDavid van Moolenbroek * worker_allow *
1617eb698eaSDavid van Moolenbroek *===========================================================================*/
worker_allow(int allow)1627eb698eaSDavid van Moolenbroek void worker_allow(int allow)
1637eb698eaSDavid van Moolenbroek {
1647eb698eaSDavid van Moolenbroek /* Allow or disallow workers to process new work. If disallowed, any new work
1657eb698eaSDavid van Moolenbroek * will be stored as pending, even when there are free worker threads. There is
1667eb698eaSDavid van Moolenbroek * no facility to stop active workers. To be used only during initialization!
1677eb698eaSDavid van Moolenbroek */
1687eb698eaSDavid van Moolenbroek struct fproc *rfp;
1697eb698eaSDavid van Moolenbroek
1707eb698eaSDavid van Moolenbroek block_all = !allow;
1717eb698eaSDavid van Moolenbroek
1727eb698eaSDavid van Moolenbroek if (!worker_may_do_pending())
1737eb698eaSDavid van Moolenbroek return;
1747eb698eaSDavid van Moolenbroek
1757eb698eaSDavid van Moolenbroek /* Assign any pending work to workers. */
1767eb698eaSDavid van Moolenbroek for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
1777eb698eaSDavid van Moolenbroek if (rfp->fp_flags & FP_PENDING) {
1787eb698eaSDavid van Moolenbroek rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
1797eb698eaSDavid van Moolenbroek assert(pending > 0);
1807eb698eaSDavid van Moolenbroek pending--;
1817eb698eaSDavid van Moolenbroek worker_assign(rfp);
1827eb698eaSDavid van Moolenbroek
1837eb698eaSDavid van Moolenbroek if (!worker_may_do_pending())
1847eb698eaSDavid van Moolenbroek return;
1857eb698eaSDavid van Moolenbroek }
1867eb698eaSDavid van Moolenbroek }
1877eb698eaSDavid van Moolenbroek }
1887eb698eaSDavid van Moolenbroek
1897eb698eaSDavid van Moolenbroek /*===========================================================================*
190433d6423SLionel Sambuc * worker_get_work *
191433d6423SLionel Sambuc *===========================================================================*/
worker_get_work(void)192728b0e5bSDavid van Moolenbroek static int worker_get_work(void)
193433d6423SLionel Sambuc {
194433d6423SLionel Sambuc /* Find new work to do. Work can be 'queued', 'pending', or absent. In the
195728b0e5bSDavid van Moolenbroek * latter case wait for new work to come in. Return TRUE if there is work to
196728b0e5bSDavid van Moolenbroek * do, or FALSE if the current thread is requested to shut down.
197433d6423SLionel Sambuc */
198433d6423SLionel Sambuc struct fproc *rfp;
199433d6423SLionel Sambuc
2007eb698eaSDavid van Moolenbroek assert(self->w_fp == NULL);
2017eb698eaSDavid van Moolenbroek
2027eb698eaSDavid van Moolenbroek /* Is there pending work, and should we do it? */
2037eb698eaSDavid van Moolenbroek if (worker_may_do_pending()) {
204433d6423SLionel Sambuc /* Find pending work */
205433d6423SLionel Sambuc for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
206433d6423SLionel Sambuc if (rfp->fp_flags & FP_PENDING) {
207433d6423SLionel Sambuc self->w_fp = rfp;
208433d6423SLionel Sambuc rfp->fp_worker = self;
2097eb698eaSDavid van Moolenbroek busy++;
210433d6423SLionel Sambuc rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
2117eb698eaSDavid van Moolenbroek assert(pending > 0);
212433d6423SLionel Sambuc pending--;
213728b0e5bSDavid van Moolenbroek return TRUE;
214433d6423SLionel Sambuc }
215433d6423SLionel Sambuc }
216433d6423SLionel Sambuc panic("Pending work inconsistency");
217433d6423SLionel Sambuc }
218433d6423SLionel Sambuc
219433d6423SLionel Sambuc /* Wait for work to come to us */
220433d6423SLionel Sambuc worker_sleep();
221728b0e5bSDavid van Moolenbroek
222728b0e5bSDavid van Moolenbroek return (self->w_fp != NULL);
223433d6423SLionel Sambuc }
224433d6423SLionel Sambuc
225433d6423SLionel Sambuc /*===========================================================================*
226433d6423SLionel Sambuc * worker_available *
227433d6423SLionel Sambuc *===========================================================================*/
worker_available(void)228433d6423SLionel Sambuc int worker_available(void)
229433d6423SLionel Sambuc {
2307eb698eaSDavid van Moolenbroek /* Return the number of threads that are available, including the spare thread.
2317eb698eaSDavid van Moolenbroek */
232433d6423SLionel Sambuc
233433d6423SLionel Sambuc return(NR_WTHREADS - busy);
234433d6423SLionel Sambuc }
235433d6423SLionel Sambuc
236433d6423SLionel Sambuc /*===========================================================================*
237433d6423SLionel Sambuc * worker_main *
238433d6423SLionel Sambuc *===========================================================================*/
worker_main(void * arg)239433d6423SLionel Sambuc static void *worker_main(void *arg)
240433d6423SLionel Sambuc {
241433d6423SLionel Sambuc /* Worker thread main loop */
242433d6423SLionel Sambuc
243433d6423SLionel Sambuc self = (struct worker_thread *) arg;
244433d6423SLionel Sambuc ASSERTW(self);
245433d6423SLionel Sambuc
246728b0e5bSDavid van Moolenbroek while (worker_get_work()) {
247433d6423SLionel Sambuc
248433d6423SLionel Sambuc fp = self->w_fp;
249433d6423SLionel Sambuc assert(fp->fp_worker == self);
250433d6423SLionel Sambuc
251433d6423SLionel Sambuc /* Lock the process. */
252433d6423SLionel Sambuc lock_proc(fp);
253433d6423SLionel Sambuc
254433d6423SLionel Sambuc /* The following two blocks could be run in a loop until both the
255433d6423SLionel Sambuc * conditions are no longer met, but it is currently impossible that
256433d6423SLionel Sambuc * more normal work is present after postponed PM work has been done.
257433d6423SLionel Sambuc */
258433d6423SLionel Sambuc
259433d6423SLionel Sambuc /* Perform normal work, if any. */
260433d6423SLionel Sambuc if (fp->fp_func != NULL) {
261433d6423SLionel Sambuc self->w_m_in = fp->fp_msg;
262433d6423SLionel Sambuc err_code = OK;
263433d6423SLionel Sambuc
264433d6423SLionel Sambuc fp->fp_func();
265433d6423SLionel Sambuc
266433d6423SLionel Sambuc fp->fp_func = NULL; /* deliberately unset AFTER the call */
267433d6423SLionel Sambuc }
268433d6423SLionel Sambuc
269433d6423SLionel Sambuc /* Perform postponed PM work, if any. */
270433d6423SLionel Sambuc if (fp->fp_flags & FP_PM_WORK) {
271433d6423SLionel Sambuc self->w_m_in = fp->fp_pm_msg;
272433d6423SLionel Sambuc
273433d6423SLionel Sambuc service_pm_postponed();
274433d6423SLionel Sambuc
275433d6423SLionel Sambuc fp->fp_flags &= ~FP_PM_WORK;
276433d6423SLionel Sambuc }
277433d6423SLionel Sambuc
278433d6423SLionel Sambuc /* Perform cleanup actions. */
279433d6423SLionel Sambuc thread_cleanup();
280433d6423SLionel Sambuc
281433d6423SLionel Sambuc unlock_proc(fp);
282433d6423SLionel Sambuc
283433d6423SLionel Sambuc fp->fp_worker = NULL;
284433d6423SLionel Sambuc self->w_fp = NULL;
2857eb698eaSDavid van Moolenbroek assert(busy > 0);
2867eb698eaSDavid van Moolenbroek busy--;
287433d6423SLionel Sambuc }
288433d6423SLionel Sambuc
289728b0e5bSDavid van Moolenbroek return(NULL);
290433d6423SLionel Sambuc }
291433d6423SLionel Sambuc
292433d6423SLionel Sambuc /*===========================================================================*
293433d6423SLionel Sambuc * worker_can_start *
294433d6423SLionel Sambuc *===========================================================================*/
worker_can_start(struct fproc * rfp)295433d6423SLionel Sambuc int worker_can_start(struct fproc *rfp)
296433d6423SLionel Sambuc {
297433d6423SLionel Sambuc /* Return whether normal (non-PM) work can be started for the given process.
298433d6423SLionel Sambuc * This function is used to serialize invocation of "special" procedures, and
299433d6423SLionel Sambuc * not entirely safe for other cases, as explained in the comments below.
300433d6423SLionel Sambuc */
30126d958c7SDavid van Moolenbroek int is_pending, is_active, has_normal_work;
302433d6423SLionel Sambuc
303433d6423SLionel Sambuc is_pending = (rfp->fp_flags & FP_PENDING);
304433d6423SLionel Sambuc is_active = (rfp->fp_worker != NULL);
305433d6423SLionel Sambuc has_normal_work = (rfp->fp_func != NULL);
306433d6423SLionel Sambuc
307433d6423SLionel Sambuc /* If there is no work scheduled for the process, we can start work. */
308433d6423SLionel Sambuc if (!is_pending && !is_active) return TRUE;
309433d6423SLionel Sambuc
310433d6423SLionel Sambuc /* If there is already normal work scheduled for the process, we cannot add
311433d6423SLionel Sambuc * more, since we support only one normal job per process.
312433d6423SLionel Sambuc */
313433d6423SLionel Sambuc if (has_normal_work) return FALSE;
314433d6423SLionel Sambuc
315433d6423SLionel Sambuc /* If this process has pending PM work but no normal work, we can add the
316433d6423SLionel Sambuc * normal work for execution before the worker will start.
317433d6423SLionel Sambuc */
318433d6423SLionel Sambuc if (is_pending) return TRUE;
319433d6423SLionel Sambuc
320433d6423SLionel Sambuc /* However, if a worker is active for PM work, we cannot add normal work
321433d6423SLionel Sambuc * either, because the work will not be considered. For this reason, we can
322433d6423SLionel Sambuc * not use this function for processes that can possibly get postponed PM
323433d6423SLionel Sambuc * work. It is still safe for core system processes, though.
324433d6423SLionel Sambuc */
325433d6423SLionel Sambuc return FALSE;
326433d6423SLionel Sambuc }
327433d6423SLionel Sambuc
328433d6423SLionel Sambuc /*===========================================================================*
329433d6423SLionel Sambuc * worker_try_activate *
330433d6423SLionel Sambuc *===========================================================================*/
worker_try_activate(struct fproc * rfp,int use_spare)331433d6423SLionel Sambuc static void worker_try_activate(struct fproc *rfp, int use_spare)
332433d6423SLionel Sambuc {
333433d6423SLionel Sambuc /* See if we can wake up a thread to do the work scheduled for the given
334433d6423SLionel Sambuc * process. If not, mark the process as having pending work for later.
335433d6423SLionel Sambuc */
3367eb698eaSDavid van Moolenbroek int needed;
337433d6423SLionel Sambuc
338433d6423SLionel Sambuc /* Use the last available thread only if requested. Otherwise, leave at least
339433d6423SLionel Sambuc * one spare thread for deadlock resolution.
340433d6423SLionel Sambuc */
341433d6423SLionel Sambuc needed = use_spare ? 1 : 2;
342433d6423SLionel Sambuc
3437eb698eaSDavid van Moolenbroek /* Also make sure that doing new work is allowed at all right now, which may
3447eb698eaSDavid van Moolenbroek * not be the case during VFS initialization. We do always allow callback
3457eb698eaSDavid van Moolenbroek * calls, i.e., calls that may use the spare thread. The reason is that we do
3467eb698eaSDavid van Moolenbroek * not support callback calls being marked as pending, so the (entirely
3477eb698eaSDavid van Moolenbroek * theoretical) exception here may (entirely theoretically) avoid deadlocks.
3487eb698eaSDavid van Moolenbroek */
3497eb698eaSDavid van Moolenbroek if (needed <= worker_available() && (!block_all || use_spare)) {
3507eb698eaSDavid van Moolenbroek worker_assign(rfp);
351433d6423SLionel Sambuc } else {
352433d6423SLionel Sambuc rfp->fp_flags |= FP_PENDING;
353433d6423SLionel Sambuc pending++;
354433d6423SLionel Sambuc }
355433d6423SLionel Sambuc }
356433d6423SLionel Sambuc
357433d6423SLionel Sambuc /*===========================================================================*
358433d6423SLionel Sambuc * worker_start *
359433d6423SLionel Sambuc *===========================================================================*/
worker_start(struct fproc * rfp,void (* func)(void),message * m_ptr,int use_spare)360433d6423SLionel Sambuc void worker_start(struct fproc *rfp, void (*func)(void), message *m_ptr,
361433d6423SLionel Sambuc int use_spare)
362433d6423SLionel Sambuc {
363433d6423SLionel Sambuc /* Schedule work to be done by a worker thread. The work is bound to the given
364433d6423SLionel Sambuc * process. If a function pointer is given, the work is considered normal work,
365433d6423SLionel Sambuc * and the function will be called to handle it. If the function pointer is
366433d6423SLionel Sambuc * NULL, the work is considered postponed PM work, and service_pm_postponed
367433d6423SLionel Sambuc * will be called to handle it. The input message will be a copy of the given
368433d6423SLionel Sambuc * message. Optionally, the last spare (deadlock-resolving) thread may be used
369433d6423SLionel Sambuc * to execute the work immediately.
370433d6423SLionel Sambuc */
37126d958c7SDavid van Moolenbroek int is_pm_work, is_pending, is_active, has_normal_work, has_pm_work;
372433d6423SLionel Sambuc
373433d6423SLionel Sambuc assert(rfp != NULL);
374433d6423SLionel Sambuc
375433d6423SLionel Sambuc is_pm_work = (func == NULL);
376433d6423SLionel Sambuc is_pending = (rfp->fp_flags & FP_PENDING);
377433d6423SLionel Sambuc is_active = (rfp->fp_worker != NULL);
378433d6423SLionel Sambuc has_normal_work = (rfp->fp_func != NULL);
379433d6423SLionel Sambuc has_pm_work = (rfp->fp_flags & FP_PM_WORK);
380433d6423SLionel Sambuc
381433d6423SLionel Sambuc /* Sanity checks. If any of these trigger, someone messed up badly! */
382433d6423SLionel Sambuc if (is_pending || is_active) {
383433d6423SLionel Sambuc if (is_pending && is_active)
384433d6423SLionel Sambuc panic("work cannot be both pending and active");
385433d6423SLionel Sambuc
386433d6423SLionel Sambuc /* The process cannot make more than one call at once. */
387433d6423SLionel Sambuc if (!is_pm_work && has_normal_work)
388433d6423SLionel Sambuc panic("process has two calls (%x, %x)",
389433d6423SLionel Sambuc rfp->fp_msg.m_type, m_ptr->m_type);
390433d6423SLionel Sambuc
391433d6423SLionel Sambuc /* PM will not send more than one job per process to us at once. */
392433d6423SLionel Sambuc if (is_pm_work && has_pm_work)
393433d6423SLionel Sambuc panic("got two calls from PM (%x, %x)",
394433d6423SLionel Sambuc rfp->fp_pm_msg.m_type, m_ptr->m_type);
395433d6423SLionel Sambuc
396433d6423SLionel Sambuc /* Despite PM's sys_delay_stop() system, it is possible that normal
397433d6423SLionel Sambuc * work (in particular, do_pending_pipe) arrives after postponed PM
398433d6423SLionel Sambuc * work has been scheduled for execution, so we don't check for that.
399433d6423SLionel Sambuc */
400433d6423SLionel Sambuc #if 0
401433d6423SLionel Sambuc printf("VFS: adding %s work to %s thread\n",
402433d6423SLionel Sambuc is_pm_work ? "PM" : "normal",
403433d6423SLionel Sambuc is_pending ? "pending" : "active");
404433d6423SLionel Sambuc #endif
405433d6423SLionel Sambuc } else {
406433d6423SLionel Sambuc /* Some cleanup step forgotten somewhere? */
407433d6423SLionel Sambuc if (has_normal_work || has_pm_work)
408433d6423SLionel Sambuc panic("worker administration error");
409433d6423SLionel Sambuc }
410433d6423SLionel Sambuc
411433d6423SLionel Sambuc /* Save the work to be performed. */
412433d6423SLionel Sambuc if (!is_pm_work) {
413433d6423SLionel Sambuc rfp->fp_msg = *m_ptr;
414433d6423SLionel Sambuc rfp->fp_func = func;
415433d6423SLionel Sambuc } else {
416433d6423SLionel Sambuc rfp->fp_pm_msg = *m_ptr;
417433d6423SLionel Sambuc rfp->fp_flags |= FP_PM_WORK;
418433d6423SLionel Sambuc }
419433d6423SLionel Sambuc
420433d6423SLionel Sambuc /* If we have not only added to existing work, go look for a free thread.
421433d6423SLionel Sambuc * Note that we won't be using the spare thread for normal work if there is
422433d6423SLionel Sambuc * already PM work pending, but that situation will never occur in practice.
423433d6423SLionel Sambuc */
424433d6423SLionel Sambuc if (!is_pending && !is_active)
425433d6423SLionel Sambuc worker_try_activate(rfp, use_spare);
426433d6423SLionel Sambuc }
427433d6423SLionel Sambuc
428433d6423SLionel Sambuc /*===========================================================================*
429728b0e5bSDavid van Moolenbroek * worker_yield *
430728b0e5bSDavid van Moolenbroek *===========================================================================*/
worker_yield(void)431728b0e5bSDavid van Moolenbroek void worker_yield(void)
432728b0e5bSDavid van Moolenbroek {
433728b0e5bSDavid van Moolenbroek /* Yield to all worker threads. To be called from the main thread only. */
434728b0e5bSDavid van Moolenbroek
435728b0e5bSDavid van Moolenbroek mthread_yield_all();
436728b0e5bSDavid van Moolenbroek
437728b0e5bSDavid van Moolenbroek self = NULL;
438728b0e5bSDavid van Moolenbroek }
439728b0e5bSDavid van Moolenbroek
440728b0e5bSDavid van Moolenbroek /*===========================================================================*
441433d6423SLionel Sambuc * worker_sleep *
442433d6423SLionel Sambuc *===========================================================================*/
worker_sleep(void)443433d6423SLionel Sambuc static void worker_sleep(void)
444433d6423SLionel Sambuc {
445433d6423SLionel Sambuc struct worker_thread *worker = self;
446433d6423SLionel Sambuc ASSERTW(worker);
447433d6423SLionel Sambuc if (mutex_lock(&worker->w_event_mutex) != 0)
448433d6423SLionel Sambuc panic("unable to lock event mutex");
449433d6423SLionel Sambuc if (cond_wait(&worker->w_event, &worker->w_event_mutex) != 0)
450433d6423SLionel Sambuc panic("could not wait on conditional variable");
451433d6423SLionel Sambuc if (mutex_unlock(&worker->w_event_mutex) != 0)
452433d6423SLionel Sambuc panic("unable to unlock event mutex");
453433d6423SLionel Sambuc self = worker;
454433d6423SLionel Sambuc }
455433d6423SLionel Sambuc
456433d6423SLionel Sambuc /*===========================================================================*
457433d6423SLionel Sambuc * worker_wake *
458433d6423SLionel Sambuc *===========================================================================*/
worker_wake(struct worker_thread * worker)459433d6423SLionel Sambuc static void worker_wake(struct worker_thread *worker)
460433d6423SLionel Sambuc {
461433d6423SLionel Sambuc /* Signal a worker to wake up */
462433d6423SLionel Sambuc ASSERTW(worker);
463433d6423SLionel Sambuc if (mutex_lock(&worker->w_event_mutex) != 0)
464433d6423SLionel Sambuc panic("unable to lock event mutex");
465433d6423SLionel Sambuc if (cond_signal(&worker->w_event) != 0)
466433d6423SLionel Sambuc panic("unable to signal conditional variable");
467433d6423SLionel Sambuc if (mutex_unlock(&worker->w_event_mutex) != 0)
468433d6423SLionel Sambuc panic("unable to unlock event mutex");
469433d6423SLionel Sambuc }
470433d6423SLionel Sambuc
471433d6423SLionel Sambuc /*===========================================================================*
472433d6423SLionel Sambuc * worker_suspend *
473433d6423SLionel Sambuc *===========================================================================*/
worker_suspend(void)474433d6423SLionel Sambuc struct worker_thread *worker_suspend(void)
475433d6423SLionel Sambuc {
476433d6423SLionel Sambuc /* Suspend the current thread, saving certain thread variables. Return a
477433d6423SLionel Sambuc * pointer to the thread's worker structure for later resumption.
478433d6423SLionel Sambuc */
479433d6423SLionel Sambuc
480433d6423SLionel Sambuc ASSERTW(self);
481433d6423SLionel Sambuc assert(fp != NULL);
482433d6423SLionel Sambuc assert(self->w_fp == fp);
483433d6423SLionel Sambuc assert(fp->fp_worker == self);
484433d6423SLionel Sambuc
485433d6423SLionel Sambuc self->w_err_code = err_code;
486433d6423SLionel Sambuc
487433d6423SLionel Sambuc return self;
488433d6423SLionel Sambuc }
489433d6423SLionel Sambuc
490433d6423SLionel Sambuc /*===========================================================================*
491433d6423SLionel Sambuc * worker_resume *
492433d6423SLionel Sambuc *===========================================================================*/
worker_resume(struct worker_thread * org_self)493433d6423SLionel Sambuc void worker_resume(struct worker_thread *org_self)
494433d6423SLionel Sambuc {
495433d6423SLionel Sambuc /* Resume the current thread after suspension, restoring thread variables. */
496433d6423SLionel Sambuc
497433d6423SLionel Sambuc ASSERTW(org_self);
498433d6423SLionel Sambuc
499433d6423SLionel Sambuc self = org_self;
500433d6423SLionel Sambuc
501433d6423SLionel Sambuc fp = self->w_fp;
502433d6423SLionel Sambuc assert(fp != NULL);
503433d6423SLionel Sambuc
504433d6423SLionel Sambuc err_code = self->w_err_code;
505433d6423SLionel Sambuc }
506433d6423SLionel Sambuc
507433d6423SLionel Sambuc /*===========================================================================*
508433d6423SLionel Sambuc * worker_wait *
509433d6423SLionel Sambuc *===========================================================================*/
worker_wait(void)510433d6423SLionel Sambuc void worker_wait(void)
511433d6423SLionel Sambuc {
512433d6423SLionel Sambuc /* Put the current thread to sleep until woken up by the main thread. */
513433d6423SLionel Sambuc
514433d6423SLionel Sambuc (void) worker_suspend(); /* worker_sleep already saves and restores 'self' */
515433d6423SLionel Sambuc
516433d6423SLionel Sambuc worker_sleep();
517433d6423SLionel Sambuc
518433d6423SLionel Sambuc /* We continue here after waking up */
519433d6423SLionel Sambuc worker_resume(self);
520433d6423SLionel Sambuc assert(self->w_next == NULL);
521433d6423SLionel Sambuc }
522433d6423SLionel Sambuc
523433d6423SLionel Sambuc /*===========================================================================*
524433d6423SLionel Sambuc * worker_signal *
525433d6423SLionel Sambuc *===========================================================================*/
worker_signal(struct worker_thread * worker)526433d6423SLionel Sambuc void worker_signal(struct worker_thread *worker)
527433d6423SLionel Sambuc {
528433d6423SLionel Sambuc ASSERTW(worker); /* Make sure we have a valid thread */
529433d6423SLionel Sambuc worker_wake(worker);
530433d6423SLionel Sambuc }
531433d6423SLionel Sambuc
532433d6423SLionel Sambuc /*===========================================================================*
533433d6423SLionel Sambuc * worker_stop *
534433d6423SLionel Sambuc *===========================================================================*/
worker_stop(struct worker_thread * worker)535433d6423SLionel Sambuc void worker_stop(struct worker_thread *worker)
536433d6423SLionel Sambuc {
537433d6423SLionel Sambuc ASSERTW(worker); /* Make sure we have a valid thread */
538433d6423SLionel Sambuc /* This thread is communicating with a driver or file server */
539433d6423SLionel Sambuc if (worker->w_drv_sendrec != NULL) { /* Driver */
540*7d0647dbSDavid van Moolenbroek assert(worker->w_task != NONE);
541433d6423SLionel Sambuc worker->w_drv_sendrec->m_type = EIO;
54229e004d2SDavid van Moolenbroek worker->w_drv_sendrec = NULL;
543433d6423SLionel Sambuc } else if (worker->w_sendrec != NULL) { /* FS */
544*7d0647dbSDavid van Moolenbroek /* worker->w_task may be NONE if the FS message was still queued */
545433d6423SLionel Sambuc worker->w_sendrec->m_type = EIO;
54629e004d2SDavid van Moolenbroek worker->w_sendrec = NULL;
547*7d0647dbSDavid van Moolenbroek } else
548433d6423SLionel Sambuc panic("reply storage consistency error"); /* Oh dear */
549433d6423SLionel Sambuc worker_wake(worker);
550433d6423SLionel Sambuc }
551433d6423SLionel Sambuc
552433d6423SLionel Sambuc /*===========================================================================*
553433d6423SLionel Sambuc * worker_stop_by_endpt *
554433d6423SLionel Sambuc *===========================================================================*/
worker_stop_by_endpt(endpoint_t proc_e)555433d6423SLionel Sambuc void worker_stop_by_endpt(endpoint_t proc_e)
556433d6423SLionel Sambuc {
557433d6423SLionel Sambuc struct worker_thread *worker;
558433d6423SLionel Sambuc int i;
559433d6423SLionel Sambuc
560433d6423SLionel Sambuc if (proc_e == NONE) return;
561433d6423SLionel Sambuc
562433d6423SLionel Sambuc for (i = 0; i < NR_WTHREADS; i++) {
563433d6423SLionel Sambuc worker = &workers[i];
564433d6423SLionel Sambuc if (worker->w_fp != NULL && worker->w_task == proc_e)
565433d6423SLionel Sambuc worker_stop(worker);
566433d6423SLionel Sambuc }
567433d6423SLionel Sambuc }
568433d6423SLionel Sambuc
569433d6423SLionel Sambuc /*===========================================================================*
570433d6423SLionel Sambuc * worker_get *
571433d6423SLionel Sambuc *===========================================================================*/
worker_get(thread_t worker_tid)572433d6423SLionel Sambuc struct worker_thread *worker_get(thread_t worker_tid)
573433d6423SLionel Sambuc {
574433d6423SLionel Sambuc int i;
575433d6423SLionel Sambuc
576433d6423SLionel Sambuc for (i = 0; i < NR_WTHREADS; i++)
577433d6423SLionel Sambuc if (workers[i].w_tid == worker_tid)
578433d6423SLionel Sambuc return(&workers[i]);
579433d6423SLionel Sambuc
580433d6423SLionel Sambuc return(NULL);
581433d6423SLionel Sambuc }
582433d6423SLionel Sambuc
583433d6423SLionel Sambuc /*===========================================================================*
584433d6423SLionel Sambuc * worker_set_proc *
585433d6423SLionel Sambuc *===========================================================================*/
worker_set_proc(struct fproc * rfp)586433d6423SLionel Sambuc void worker_set_proc(struct fproc *rfp)
587433d6423SLionel Sambuc {
588433d6423SLionel Sambuc /* Perform an incredibly ugly action that completely violates the threading
589433d6423SLionel Sambuc * model: change the current working thread's process context to another
590433d6423SLionel Sambuc * process. The caller is expected to hold the lock to both the calling and the
591433d6423SLionel Sambuc * target process, and neither process is expected to continue regular
592433d6423SLionel Sambuc * operation when done. This code is here *only* and *strictly* for the reboot
593433d6423SLionel Sambuc * code, and *must not* be used for anything else.
594433d6423SLionel Sambuc */
595433d6423SLionel Sambuc
596433d6423SLionel Sambuc if (fp == rfp) return;
597433d6423SLionel Sambuc
598433d6423SLionel Sambuc if (rfp->fp_worker != NULL)
599433d6423SLionel Sambuc panic("worker_set_proc: target process not idle");
600433d6423SLionel Sambuc
601433d6423SLionel Sambuc fp->fp_worker = NULL;
602433d6423SLionel Sambuc
603433d6423SLionel Sambuc fp = rfp;
604433d6423SLionel Sambuc
605433d6423SLionel Sambuc self->w_fp = rfp;
606433d6423SLionel Sambuc fp->fp_worker = self;
607433d6423SLionel Sambuc }
608