xref: /netbsd-src/sys/arch/vax/vax/multicpu.c (revision da6ca23dfa92a09166f4655333ea6380b9b82691)
1 /*	$NetBSD: multicpu.c,v 1.37 2022/03/03 06:28:26 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * CPU-type independent code to spin up other VAX CPU's.
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: multicpu.c,v 1.37 2022/03/03 06:28:26 riastradh Exp $");
33 
34 #include "opt_multiprocessor.h"
35 
36 #include <sys/param.h>
37 #include <sys/cpu.h>
38 #include <sys/device.h>
39 #include <sys/kmem.h>
40 #include <sys/proc.h>
41 #include <sys/xcall.h>
42 #include <sys/ipi.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <vax/vax/gencons.h>
47 
48 #include "ioconf.h"
49 
50 const struct cpu_mp_dep *mp_dep_call;
51 
52 struct cpuq {
53 	SIMPLEQ_ENTRY(cpuq) cq_q;
54 	struct cpu_info *cq_ci;
55 	device_t cq_dev;
56 };
57 
58 SIMPLEQ_HEAD(, cpuq) cpuq = SIMPLEQ_HEAD_INITIALIZER(cpuq);
59 
60 extern long avail_start, avail_end;
61 struct cpu_info_qh cpus = SIMPLEQ_HEAD_INITIALIZER(cpus);
62 
63 void
cpu_boot_secondary_processors(void)64 cpu_boot_secondary_processors(void)
65 {
66 	struct cpuq *q;
67 
68 	while ((q = SIMPLEQ_FIRST(&cpuq))) {
69 		SIMPLEQ_REMOVE_HEAD(&cpuq, cq_q);
70 		(*mp_dep_call->cpu_startslave)(q->cq_ci);
71 		kmem_free(q, sizeof(*q));
72 	}
73 }
74 
75 /*
76  * Allocate a cpu_info struct and fill it in then prepare for getting
77  * started by cpu_boot_secondary_processors().
78  */
79 void
cpu_slavesetup(device_t self,int slotid)80 cpu_slavesetup(device_t self, int slotid)
81 {
82 	struct cpu_info *ci;
83 	struct cpuq *cq;
84 	struct vm_page *pg;
85 	vaddr_t istackbase;
86 
87 	KASSERT(device_private(self) == NULL);
88 
89 	ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
90 	device_set_private(self, ci);
91 	ci->ci_dev = self;
92 	ci->ci_slotid = slotid;
93 	ci->ci_cpuid = device_unit(self);
94 
95 	/* Allocate an interrupt stack */
96 	pg = uvm_pagealloc(NULL, 0, NULL, 0);
97 	if (pg == NULL)
98 		panic("cpu_slavesetup2");
99 
100 	istackbase = VM_PAGE_TO_PHYS(pg) | KERNBASE;
101 	kvtopte(istackbase)->pg_v = 0; /* istack safety belt */
102 
103 	/* Populate the PCB and the cpu_info struct */
104 	ci->ci_istack = istackbase + PAGE_SIZE;
105 	SIMPLEQ_INSERT_TAIL(&cpus, ci, ci_next);
106 
107 	cq = kmem_zalloc(sizeof(*cq), KM_SLEEP);
108 	cq->cq_ci = ci;
109 	cq->cq_dev = ci->ci_dev;
110 	SIMPLEQ_INSERT_TAIL(&cpuq, cq, cq_q);
111 
112 	mi_cpu_attach(ci);	/* let the MI parts know about the new cpu */
113 }
114 
115 /*
116  * Send an IPI of type type to the CPU with logical device number cpu.
117  */
118 void
cpu_send_ipi(int cpu,int type)119 cpu_send_ipi(int cpu, int type)
120 {
121 	struct cpu_info *ci;
122 	int i;
123 
124 	if (cpu >= 0) {
125 		ci = device_lookup_private(&cpu_cd, cpu);
126 		bbssi(type, &ci->ci_ipimsgs);
127 		(*mp_dep_call->cpu_send_ipi)(ci);
128 		return;
129 	}
130 
131 	for (i = 0; i < cpu_cd.cd_ndevs; i++) {
132 		ci = device_lookup_private(&cpu_cd, i);
133 		if (ci == NULL)
134 			continue;
135 		switch (cpu) {
136 		case IPI_DEST_MASTER:
137 			if (ci->ci_flags & CI_MASTERCPU) {
138 				bbssi(type, &ci->ci_ipimsgs);
139 				(*mp_dep_call->cpu_send_ipi)(ci);
140 			}
141 			break;
142 		case IPI_DEST_ALL:
143 			if (i == cpu_number())
144 				continue;	/* No IPI to myself */
145 			bbssi(type, &ci->ci_ipimsgs);
146 			(*mp_dep_call->cpu_send_ipi)(ci);
147 			break;
148 		}
149 	}
150 }
151 
152 void
cpu_handle_ipi(void)153 cpu_handle_ipi(void)
154 {
155 	struct cpu_info * const ci = curcpu();
156 	int bitno;
157 	int s;
158 
159 	s = splhigh();
160 
161 	while ((bitno = ffs(ci->ci_ipimsgs))) {
162 		bitno -= 1; /* ffs() starts from 1 */
163 		bbcci(bitno, &ci->ci_ipimsgs);
164 		switch (bitno) {
165 		case IPI_START_CNTX:
166 #ifdef DIAGNOSTIC
167 			if (CPU_IS_PRIMARY(ci) == 0)
168 				panic("cpu_handle_ipi");
169 #endif
170 			gencnstarttx();
171 			break;
172 		case IPI_SEND_CNCHAR:
173 #ifdef DIAGNOSTIC
174 			if (CPU_IS_PRIMARY(ci) == 0)
175 				panic("cpu_handle_ipi2");
176 #endif
177 			(*mp_dep_call->cpu_cnintr)();
178 			break;
179 		case IPI_RUNNING:
180 			break;
181 		case IPI_TBIA:
182 			mtpr(0, PR_TBIA);
183 			break;
184 		case IPI_DDB:
185 			Debugger();
186 			break;
187 		case IPI_XCALL:
188 			xc_ipi_handler();
189 			break;
190 		case IPI_GENERIC:
191 			ipi_cpu_handler();
192 			break;
193 		default:
194 			panic("cpu_handle_ipi: bad bit %x", bitno);
195 		}
196 	}
197 	splx(s);
198 }
199 
200 /*
201  * MD support for xcall(9) interface.
202  */
203 
204 void
xc_send_ipi(struct cpu_info * ci)205 xc_send_ipi(struct cpu_info *ci)
206 {
207 	KASSERT(kpreempt_disabled());
208 	KASSERT(curcpu() != ci);
209 
210 	if (ci) {
211 		/* Unicast: remote CPU. */
212 		cpu_send_ipi(ci->ci_cpuid, IPI_XCALL);
213 	} else {
214 		/* Broadcast: all, but local CPU (caller will handle it). */
215 		cpu_send_ipi(IPI_DEST_ALL, IPI_XCALL);
216 	}
217 }
218 
219 void
cpu_ipi(struct cpu_info * ci)220 cpu_ipi(struct cpu_info *ci)
221 {
222 	KASSERT(kpreempt_disabled());
223 	KASSERT(curcpu() != ci);
224 
225 	if (ci) {
226 		/* Unicast: remote CPU. */
227 		cpu_send_ipi(ci->ci_cpuid, IPI_GENERIC);
228 	} else {
229 		/* Broadcast: all, but local CPU (caller will handle it). */
230 		cpu_send_ipi(IPI_DEST_ALL, IPI_GENERIC);
231 	}
232 }
233