1 #include <minix/mthread.h>
2 #include "global.h"
3 #include "proto.h"
4
5 #define MAIN_CTX &(mainthread.m_context)
6 #define MAIN_STATE mainthread.m_state
7 #define OLD_CTX &(threads[old_thread]->m_context)
8 #define CURRENT_CTX &(threads[current_thread]->m_context)
9 #define CURRENT_STATE threads[current_thread]->m_state
10 static int yield_all;
11
12 /*===========================================================================*
13 * mthread_getcontext *
14 *===========================================================================*/
mthread_getcontext(ctx)15 int mthread_getcontext(ctx)
16 ucontext_t *ctx;
17 {
18 /* Retrieve this process' current state.*/
19
20 /* We're not interested in FPU state nor signals, so ignore them.
21 * Coincidentally, this significantly speeds up performance.
22 */
23 ctx->uc_flags |= _UC_IGNSIGM | _UC_IGNFPU;
24 return getcontext(ctx);
25 }
26
27
28 /*===========================================================================*
29 * mthread_schedule *
30 *===========================================================================*/
mthread_schedule(void)31 void mthread_schedule(void)
32 {
33 /* Pick a new thread to run and run it. In practice, this involves taking the
34 * first thread off the (FIFO) run queue and resuming that thread.
35 */
36
37 mthread_thread_t old_thread;
38 mthread_tcb_t *new_tcb, *old_tcb;
39 ucontext_t *new_ctx, *old_ctx;
40
41 old_thread = current_thread;
42
43 if (mthread_queue_isempty(&run_queue)) {
44 /* No runnable threads. Let main thread run. */
45
46 /* We keep track whether we're running the program's 'main' thread or
47 * a spawned thread. In case we're already running the main thread and
48 * there are no runnable threads, we can't jump back to its context.
49 * Instead, we simply return.
50 */
51 if (running_main_thread) return;
52
53 /* We're running the last runnable spawned thread. Return to main
54 * thread as there is no work left.
55 */
56 current_thread = MAIN_THREAD;
57 } else {
58 current_thread = mthread_queue_remove(&run_queue);
59 }
60
61 /* Find thread entries in tcb... */
62 new_tcb = mthread_find_tcb(current_thread);
63 old_tcb = mthread_find_tcb(old_thread);
64
65 /* ...and subsequently their contexts */
66 new_ctx = &(new_tcb->m_context);
67 old_ctx = &(old_tcb->m_context);
68
69 /* Are we running the 'main' thread after swap? */
70 running_main_thread = (current_thread == MAIN_THREAD);
71
72 if (swapcontext(old_ctx, new_ctx) == -1)
73 mthread_panic("Could not swap context");
74
75 }
76
77
78 /*===========================================================================*
79 * mthread_init_scheduler *
80 *===========================================================================*/
mthread_init_scheduler(void)81 void mthread_init_scheduler(void)
82 {
83 /* Initialize the scheduler */
84 mthread_queue_init(&run_queue);
85 yield_all = 0;
86
87 }
88
89
90 /*===========================================================================*
91 * mthread_suspend *
92 *===========================================================================*/
mthread_suspend(state)93 void mthread_suspend(state)
94 mthread_state_t state;
95 {
96 /* Stop the current thread from running. There can be multiple reasons for
97 * this; the process tries to lock a locked mutex (i.e., has to wait for it to
98 * become unlocked), the process has to wait for a condition, the thread
99 * volunteered to let another thread to run (i.e., it called yield and remains
100 * runnable itself), or the thread is dead.
101 */
102
103 int continue_thread = 0;
104 mthread_tcb_t *tcb;
105 ucontext_t *ctx;
106
107 if (state == MS_DEAD) mthread_panic("Shouldn't suspend with MS_DEAD state");
108 tcb = mthread_find_tcb(current_thread);
109 tcb->m_state = state;
110 ctx = &(tcb->m_context);
111
112 /* Save current thread's context */
113 if (mthread_getcontext(ctx) != 0)
114 mthread_panic("Couldn't save current thread's context");
115
116 /* We return execution here with setcontext/swapcontext, but also when we
117 * simply return from the getcontext call. If continue_thread is non-zero, we
118 * are continuing the execution of this thread after a call from setcontext
119 * or swapcontext.
120 */
121
122 if(!continue_thread) {
123 continue_thread = 1;
124 mthread_schedule(); /* Let other thread run. */
125 }
126 }
127
128
129 /*===========================================================================*
130 * mthread_unsuspend *
131 *===========================================================================*/
mthread_unsuspend(thread)132 void mthread_unsuspend(thread)
133 mthread_thread_t thread; /* Thread to make runnable */
134 {
135 /* Mark the state of a thread runnable and add it to the run queue */
136 mthread_tcb_t *tcb;
137
138 if (!isokthreadid(thread)) mthread_panic("Invalid thread id");
139
140 tcb = mthread_find_tcb(thread);
141 tcb->m_state = MS_RUNNABLE;
142 mthread_queue_add(&run_queue, thread);
143 }
144
145
146 /*===========================================================================*
147 * mthread_yield *
148 *===========================================================================*/
mthread_yield(void)149 int mthread_yield(void)
150 {
151 /* Defer further execution of the current thread and let another thread run. */
152 mthread_tcb_t *tcb;
153 mthread_thread_t t;
154
155 /* Detached threads cannot clean themselves up. This is a perfect moment to
156 * do it */
157 for (t = (mthread_thread_t) 0; need_reset > 0 && t < no_threads; t++) {
158 tcb = mthread_find_tcb(t);
159 if (tcb->m_state == MS_NEEDRESET) {
160 mthread_thread_reset(t);
161 used_threads--;
162 need_reset--;
163 mthread_queue_add(&free_threads, t);
164 }
165 }
166
167 if (mthread_queue_isempty(&run_queue)) { /* No point in yielding. */
168 return(-1);
169 } else if (current_thread == NO_THREAD) {
170 /* Can't yield this thread */
171 return(-1);
172 }
173
174 mthread_queue_add(&run_queue, current_thread);
175 mthread_suspend(MS_RUNNABLE); /* We're still runnable, but we're just kind
176 * enough to let someone else run.
177 */
178 return(0);
179 }
180
181
182 /*===========================================================================*
183 * mthread_yield_all *
184 *===========================================================================*/
mthread_yield_all(void)185 void mthread_yield_all(void)
186 {
187 /* Yield until there are no more runnable threads left. Two threads calling
188 * this function will lead to a deadlock.
189 */
190
191 if (yield_all) mthread_panic("Deadlock: two threads trying to yield_all");
192 yield_all = 1;
193
194 /* This works as follows. Thread A is running and threads B, C, and D are
195 * runnable. As A is running, it is NOT on the run_queue (see
196 * mthread_schedule). It calls mthread_yield and will be added to the run
197 * queue, allowing B to run. B runs and suspends eventually, possibly still
198 * in a runnable state. Then C and D run. Eventually A will run again (and is
199 * thus not on the list). If B, C, and D are dead, waiting for a condition,
200 * or waiting for a lock, they are not on the run queue either. At that
201 * point A is the only runnable thread left.
202 */
203 while (!mthread_queue_isempty(&run_queue)) {
204 (void) mthread_yield();
205 }
206
207 /* Done yielding all threads. */
208 yield_all = 0;
209 }
210
211 /* pthread compatibility layer. */
212 __weak_alias(pthread_yield, mthread_yield)
213 __weak_alias(sched_yield, mthread_yield)
214 __weak_alias(pthread_yield_all, mthread_yield_all)
215
216