xref: /minix3/minix/kernel/smp.c (revision 433d6423c39e34ec4b79c950597bb2d236f886be)
1*433d6423SLionel Sambuc #include <assert.h>
2*433d6423SLionel Sambuc 
3*433d6423SLionel Sambuc #include "smp.h"
4*433d6423SLionel Sambuc #include "interrupt.h"
5*433d6423SLionel Sambuc #include "clock.h"
6*433d6423SLionel Sambuc 
7*433d6423SLionel Sambuc unsigned ncpus;
8*433d6423SLionel Sambuc unsigned ht_per_core;
9*433d6423SLionel Sambuc unsigned bsp_cpu_id;
10*433d6423SLionel Sambuc 
11*433d6423SLionel Sambuc struct cpu cpus[CONFIG_MAX_CPUS];
12*433d6423SLionel Sambuc 
13*433d6423SLionel Sambuc /* info passed to another cpu along with a sched ipi */
14*433d6423SLionel Sambuc struct sched_ipi_data {
15*433d6423SLionel Sambuc 	volatile u32_t	flags;
16*433d6423SLionel Sambuc 	volatile u32_t	data;
17*433d6423SLionel Sambuc };
18*433d6423SLionel Sambuc 
19*433d6423SLionel Sambuc static struct sched_ipi_data  sched_ipi_data[CONFIG_MAX_CPUS];
20*433d6423SLionel Sambuc 
21*433d6423SLionel Sambuc #define SCHED_IPI_STOP_PROC	1
22*433d6423SLionel Sambuc #define SCHED_IPI_VM_INHIBIT	2
23*433d6423SLionel Sambuc #define SCHED_IPI_SAVE_CTX	4
24*433d6423SLionel Sambuc 
25*433d6423SLionel Sambuc static volatile unsigned ap_cpus_booted;
26*433d6423SLionel Sambuc 
27*433d6423SLionel Sambuc SPINLOCK_DEFINE(big_kernel_lock)
SPINLOCK_DEFINE(boot_lock)28*433d6423SLionel Sambuc SPINLOCK_DEFINE(boot_lock)
29*433d6423SLionel Sambuc 
30*433d6423SLionel Sambuc void wait_for_APs_to_finish_booting(void)
31*433d6423SLionel Sambuc {
32*433d6423SLionel Sambuc 	unsigned n = 0;
33*433d6423SLionel Sambuc 	int i;
34*433d6423SLionel Sambuc 
35*433d6423SLionel Sambuc 	/* check how many cpus are actually alive */
36*433d6423SLionel Sambuc 	for (i = 0 ; i < ncpus ; i++) {
37*433d6423SLionel Sambuc 		if (cpu_test_flag(i, CPU_IS_READY))
38*433d6423SLionel Sambuc 			n++;
39*433d6423SLionel Sambuc 	}
40*433d6423SLionel Sambuc 	if (n != ncpus)
41*433d6423SLionel Sambuc 		printf("WARNING only %d out of %d cpus booted\n", n, ncpus);
42*433d6423SLionel Sambuc 
43*433d6423SLionel Sambuc 	/* we must let the other CPUs to run in kernel mode first */
44*433d6423SLionel Sambuc 	BKL_UNLOCK();
45*433d6423SLionel Sambuc 	while (ap_cpus_booted != (n - 1))
46*433d6423SLionel Sambuc 		arch_pause();
47*433d6423SLionel Sambuc 	/* now we have to take the lock again as we continue execution */
48*433d6423SLionel Sambuc 	BKL_LOCK();
49*433d6423SLionel Sambuc }
50*433d6423SLionel Sambuc 
ap_boot_finished(unsigned cpu)51*433d6423SLionel Sambuc void ap_boot_finished(unsigned cpu)
52*433d6423SLionel Sambuc {
53*433d6423SLionel Sambuc 	ap_cpus_booted++;
54*433d6423SLionel Sambuc }
55*433d6423SLionel Sambuc 
smp_ipi_halt_handler(void)56*433d6423SLionel Sambuc void smp_ipi_halt_handler(void)
57*433d6423SLionel Sambuc {
58*433d6423SLionel Sambuc 	ipi_ack();
59*433d6423SLionel Sambuc 	stop_local_timer();
60*433d6423SLionel Sambuc 	arch_smp_halt_cpu();
61*433d6423SLionel Sambuc }
62*433d6423SLionel Sambuc 
smp_schedule(unsigned cpu)63*433d6423SLionel Sambuc void smp_schedule(unsigned cpu)
64*433d6423SLionel Sambuc {
65*433d6423SLionel Sambuc 	arch_send_smp_schedule_ipi(cpu);
66*433d6423SLionel Sambuc }
67*433d6423SLionel Sambuc 
68*433d6423SLionel Sambuc void smp_sched_handler(void);
69*433d6423SLionel Sambuc 
70*433d6423SLionel Sambuc /*
71*433d6423SLionel Sambuc  * tell another cpu about a task to do and return only after the cpu acks that
72*433d6423SLionel Sambuc  * the task is finished. Also wait before it finishes task sent by another cpu
73*433d6423SLionel Sambuc  * to the same one.
74*433d6423SLionel Sambuc  */
smp_schedule_sync(struct proc * p,unsigned task)75*433d6423SLionel Sambuc static void smp_schedule_sync(struct proc * p, unsigned task)
76*433d6423SLionel Sambuc {
77*433d6423SLionel Sambuc 	unsigned cpu = p->p_cpu;
78*433d6423SLionel Sambuc 	unsigned mycpu = cpuid;
79*433d6423SLionel Sambuc 
80*433d6423SLionel Sambuc 	assert(cpu != mycpu);
81*433d6423SLionel Sambuc 	/*
82*433d6423SLionel Sambuc 	 * if some other cpu made a request to the same cpu, wait until it is
83*433d6423SLionel Sambuc 	 * done before proceeding
84*433d6423SLionel Sambuc 	 */
85*433d6423SLionel Sambuc 	if (sched_ipi_data[cpu].flags != 0) {
86*433d6423SLionel Sambuc 		BKL_UNLOCK();
87*433d6423SLionel Sambuc 		while (sched_ipi_data[cpu].flags != 0) {
88*433d6423SLionel Sambuc 			if (sched_ipi_data[mycpu].flags) {
89*433d6423SLionel Sambuc 				BKL_LOCK();
90*433d6423SLionel Sambuc 				smp_sched_handler();
91*433d6423SLionel Sambuc 				BKL_UNLOCK();
92*433d6423SLionel Sambuc 			}
93*433d6423SLionel Sambuc 		}
94*433d6423SLionel Sambuc 		BKL_LOCK();
95*433d6423SLionel Sambuc 	}
96*433d6423SLionel Sambuc 
97*433d6423SLionel Sambuc 	sched_ipi_data[cpu].data = (u32_t) p;
98*433d6423SLionel Sambuc 	sched_ipi_data[cpu].flags |= task;
99*433d6423SLionel Sambuc 	__insn_barrier();
100*433d6423SLionel Sambuc 	arch_send_smp_schedule_ipi(cpu);
101*433d6423SLionel Sambuc 
102*433d6423SLionel Sambuc 	/* wait until the destination cpu finishes its job */
103*433d6423SLionel Sambuc 	BKL_UNLOCK();
104*433d6423SLionel Sambuc 	while (sched_ipi_data[cpu].flags != 0) {
105*433d6423SLionel Sambuc 		if (sched_ipi_data[mycpu].flags) {
106*433d6423SLionel Sambuc 			BKL_LOCK();
107*433d6423SLionel Sambuc 			smp_sched_handler();
108*433d6423SLionel Sambuc 			BKL_UNLOCK();
109*433d6423SLionel Sambuc 		}
110*433d6423SLionel Sambuc 	}
111*433d6423SLionel Sambuc 	BKL_LOCK();
112*433d6423SLionel Sambuc }
113*433d6423SLionel Sambuc 
smp_schedule_stop_proc(struct proc * p)114*433d6423SLionel Sambuc void smp_schedule_stop_proc(struct proc * p)
115*433d6423SLionel Sambuc {
116*433d6423SLionel Sambuc 	if (proc_is_runnable(p))
117*433d6423SLionel Sambuc 		smp_schedule_sync(p, SCHED_IPI_STOP_PROC);
118*433d6423SLionel Sambuc 	else
119*433d6423SLionel Sambuc 		RTS_SET(p, RTS_PROC_STOP);
120*433d6423SLionel Sambuc 	assert(RTS_ISSET(p, RTS_PROC_STOP));
121*433d6423SLionel Sambuc }
122*433d6423SLionel Sambuc 
smp_schedule_vminhibit(struct proc * p)123*433d6423SLionel Sambuc void smp_schedule_vminhibit(struct proc * p)
124*433d6423SLionel Sambuc {
125*433d6423SLionel Sambuc 	if (proc_is_runnable(p))
126*433d6423SLionel Sambuc 		smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT);
127*433d6423SLionel Sambuc 	else
128*433d6423SLionel Sambuc 		RTS_SET(p, RTS_VMINHIBIT);
129*433d6423SLionel Sambuc 	assert(RTS_ISSET(p, RTS_VMINHIBIT));
130*433d6423SLionel Sambuc }
131*433d6423SLionel Sambuc 
smp_schedule_stop_proc_save_ctx(struct proc * p)132*433d6423SLionel Sambuc void smp_schedule_stop_proc_save_ctx(struct proc * p)
133*433d6423SLionel Sambuc {
134*433d6423SLionel Sambuc 	/*
135*433d6423SLionel Sambuc 	 * stop the processes and force the complete context of the process to
136*433d6423SLionel Sambuc 	 * be saved (i.e. including FPU state and such)
137*433d6423SLionel Sambuc 	 */
138*433d6423SLionel Sambuc 	smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
139*433d6423SLionel Sambuc 	assert(RTS_ISSET(p, RTS_PROC_STOP));
140*433d6423SLionel Sambuc }
141*433d6423SLionel Sambuc 
smp_schedule_migrate_proc(struct proc * p,unsigned dest_cpu)142*433d6423SLionel Sambuc void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
143*433d6423SLionel Sambuc {
144*433d6423SLionel Sambuc 	/*
145*433d6423SLionel Sambuc 	 * stop the processes and force the complete context of the process to
146*433d6423SLionel Sambuc 	 * be saved (i.e. including FPU state and such)
147*433d6423SLionel Sambuc 	 */
148*433d6423SLionel Sambuc 	smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
149*433d6423SLionel Sambuc 	assert(RTS_ISSET(p, RTS_PROC_STOP));
150*433d6423SLionel Sambuc 
151*433d6423SLionel Sambuc 	/* assign the new cpu and let the process run again */
152*433d6423SLionel Sambuc 	p->p_cpu = dest_cpu;
153*433d6423SLionel Sambuc 	RTS_UNSET(p, RTS_PROC_STOP);
154*433d6423SLionel Sambuc }
155*433d6423SLionel Sambuc 
smp_sched_handler(void)156*433d6423SLionel Sambuc void smp_sched_handler(void)
157*433d6423SLionel Sambuc {
158*433d6423SLionel Sambuc 	unsigned flgs;
159*433d6423SLionel Sambuc 	unsigned cpu = cpuid;
160*433d6423SLionel Sambuc 
161*433d6423SLionel Sambuc 	flgs = sched_ipi_data[cpu].flags;
162*433d6423SLionel Sambuc 
163*433d6423SLionel Sambuc 	if (flgs) {
164*433d6423SLionel Sambuc 		struct proc * p;
165*433d6423SLionel Sambuc 		p = (struct proc *)sched_ipi_data[cpu].data;
166*433d6423SLionel Sambuc 
167*433d6423SLionel Sambuc 		if (flgs & SCHED_IPI_STOP_PROC) {
168*433d6423SLionel Sambuc 			RTS_SET(p, RTS_PROC_STOP);
169*433d6423SLionel Sambuc 		}
170*433d6423SLionel Sambuc 		if (flgs & SCHED_IPI_SAVE_CTX) {
171*433d6423SLionel Sambuc 			/* all context has been saved already, FPU remains */
172*433d6423SLionel Sambuc 			if (proc_used_fpu(p) &&
173*433d6423SLionel Sambuc 					get_cpulocal_var(fpu_owner) == p) {
174*433d6423SLionel Sambuc 				disable_fpu_exception();
175*433d6423SLionel Sambuc 				save_local_fpu(p, FALSE /*retain*/);
176*433d6423SLionel Sambuc 				/* we're preparing to migrate somewhere else */
177*433d6423SLionel Sambuc 				release_fpu(p);
178*433d6423SLionel Sambuc 			}
179*433d6423SLionel Sambuc 		}
180*433d6423SLionel Sambuc 		if (flgs & SCHED_IPI_VM_INHIBIT) {
181*433d6423SLionel Sambuc 			RTS_SET(p, RTS_VMINHIBIT);
182*433d6423SLionel Sambuc 		}
183*433d6423SLionel Sambuc 	}
184*433d6423SLionel Sambuc 
185*433d6423SLionel Sambuc 	__insn_barrier();
186*433d6423SLionel Sambuc 	sched_ipi_data[cpu].flags = 0;
187*433d6423SLionel Sambuc }
188*433d6423SLionel Sambuc 
189*433d6423SLionel Sambuc /*
190*433d6423SLionel Sambuc  * This function gets always called only after smp_sched_handler() has been
191*433d6423SLionel Sambuc  * already called. It only serves the purpose of acknowledging the IPI and
192*433d6423SLionel Sambuc  * preempting the current process if the CPU was not idle.
193*433d6423SLionel Sambuc  */
smp_ipi_sched_handler(void)194*433d6423SLionel Sambuc void smp_ipi_sched_handler(void)
195*433d6423SLionel Sambuc {
196*433d6423SLionel Sambuc 	struct proc * curr;
197*433d6423SLionel Sambuc 
198*433d6423SLionel Sambuc 	ipi_ack();
199*433d6423SLionel Sambuc 
200*433d6423SLionel Sambuc 	curr = get_cpulocal_var(proc_ptr);
201*433d6423SLionel Sambuc 	if (curr->p_endpoint != IDLE) {
202*433d6423SLionel Sambuc 		RTS_SET(curr, RTS_PREEMPTED);
203*433d6423SLionel Sambuc 	}
204*433d6423SLionel Sambuc }
205*433d6423SLionel Sambuc 
206