1*0dec6ba3Sriastradh /* $NetBSD: subr_pcu.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $ */
2bc16d8a4Srmind
3bc16d8a4Srmind /*-
4d67ab12cSrmind * Copyright (c) 2011, 2014 The NetBSD Foundation, Inc.
5bc16d8a4Srmind * All rights reserved.
6bc16d8a4Srmind *
7bc16d8a4Srmind * This code is derived from software contributed to The NetBSD Foundation
8bc16d8a4Srmind * by Mindaugas Rasiukevicius.
9bc16d8a4Srmind *
10bc16d8a4Srmind * Redistribution and use in source and binary forms, with or without
11bc16d8a4Srmind * modification, are permitted provided that the following conditions
12bc16d8a4Srmind * are met:
13bc16d8a4Srmind * 1. Redistributions of source code must retain the above copyright
14bc16d8a4Srmind * notice, this list of conditions and the following disclaimer.
15bc16d8a4Srmind * 2. Redistributions in binary form must reproduce the above copyright
16bc16d8a4Srmind * notice, this list of conditions and the following disclaimer in the
17bc16d8a4Srmind * documentation and/or other materials provided with the distribution.
18bc16d8a4Srmind *
19bc16d8a4Srmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20bc16d8a4Srmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21bc16d8a4Srmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22bc16d8a4Srmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23bc16d8a4Srmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24bc16d8a4Srmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25bc16d8a4Srmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26bc16d8a4Srmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27bc16d8a4Srmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28bc16d8a4Srmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29bc16d8a4Srmind * POSSIBILITY OF SUCH DAMAGE.
30bc16d8a4Srmind */
31bc16d8a4Srmind
32bc16d8a4Srmind /*
33bc16d8a4Srmind * Per CPU Unit (PCU) - is an interface to manage synchronization of any
34bc16d8a4Srmind * per CPU context (unit) tied with LWP context. Typical use: FPU state.
35bc16d8a4Srmind *
36bc16d8a4Srmind * Concurrency notes:
37bc16d8a4Srmind *
38bc16d8a4Srmind * PCU state may be loaded only by the current LWP, that is, curlwp.
39bc16d8a4Srmind * Therefore, only LWP itself can set a CPU for lwp_t::l_pcu_cpu[id].
40bc16d8a4Srmind *
41d67ab12cSrmind * There are some important rules about operation calls. The request
42d67ab12cSrmind * for a PCU release can be from a) the owner LWP (regardless whether
43d67ab12cSrmind * the PCU state is on the current CPU or remote CPU) b) any other LWP
44d67ab12cSrmind * running on that CPU (in such case, the owner LWP is on a remote CPU
45d67ab12cSrmind * or sleeping).
46bc16d8a4Srmind *
47d67ab12cSrmind * In any case, the PCU state can *only* be changed from the current
48d67ab12cSrmind * CPU. If said PCU state is on the remote CPU, a cross-call will be
49d67ab12cSrmind * sent by the owner LWP. Therefore struct cpu_info::ci_pcu_curlwp[id]
50d67ab12cSrmind * may only be changed by the current CPU and lwp_t::l_pcu_cpu[id] may
51d67ab12cSrmind * only be cleared by the CPU which has the PCU state loaded.
52bc16d8a4Srmind */
53bc16d8a4Srmind
54bc16d8a4Srmind #include <sys/cdefs.h>
55*0dec6ba3Sriastradh __KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $");
56bc16d8a4Srmind
57bc16d8a4Srmind #include <sys/param.h>
58bc16d8a4Srmind #include <sys/cpu.h>
59bc16d8a4Srmind #include <sys/lwp.h>
60bc16d8a4Srmind #include <sys/pcu.h>
6177f33c2aSrmind #include <sys/ipi.h>
62bc16d8a4Srmind
637cefbb60Smatt #if PCU_UNIT_COUNT > 0
647cefbb60Smatt
6509ae87cfSmatt static inline void pcu_do_op(const pcu_ops_t *, lwp_t * const, const int);
6609ae87cfSmatt static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, const int);
67f3c47d39Smatt
68d67ab12cSrmind /*
69d67ab12cSrmind * Internal PCU commands for the pcu_do_op() function.
70d67ab12cSrmind */
71d67ab12cSrmind #define PCU_CMD_SAVE 0x01 /* save PCU state to the LWP */
72d67ab12cSrmind #define PCU_CMD_RELEASE 0x02 /* release PCU state on the CPU */
7309ae87cfSmatt
74d67ab12cSrmind /*
7577f33c2aSrmind * Message structure for another CPU passed via ipi(9).
76d67ab12cSrmind */
77d67ab12cSrmind typedef struct {
78d67ab12cSrmind const pcu_ops_t *pcu;
79d67ab12cSrmind lwp_t * owner;
80d67ab12cSrmind const int flags;
8177f33c2aSrmind } pcu_ipi_msg_t;
8277f33c2aSrmind
8377f33c2aSrmind /*
8477f33c2aSrmind * PCU IPIs run at IPL_HIGH (aka IPL_PCU in this code).
8577f33c2aSrmind */
8677f33c2aSrmind #define splpcu splhigh
87bc16d8a4Srmind
88673662efSyamt /*
89b0c8112fSriastradh * pcu_available_p: true if lwp is allowed to use PCU state.
90b0c8112fSriastradh */
91ae43ee51Sriastradh static inline bool __diagused
pcu_available_p(struct lwp * l)92b0c8112fSriastradh pcu_available_p(struct lwp *l)
93b0c8112fSriastradh {
94b0c8112fSriastradh
95b0c8112fSriastradh /* XXX Not sure this is safe unless l is locked! */
96b0c8112fSriastradh return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) != LW_SYSTEM;
97b0c8112fSriastradh }
98b0c8112fSriastradh
99b0c8112fSriastradh /*
100673662efSyamt * pcu_switchpoint: release PCU state if the LWP is being run on another CPU.
10177f33c2aSrmind * This routine is called on each context switch by by mi_switch().
102673662efSyamt */
103bc16d8a4Srmind void
pcu_switchpoint(lwp_t * l)10438d699a2Srmind pcu_switchpoint(lwp_t *l)
105bc16d8a4Srmind {
106d67ab12cSrmind const uint32_t pcu_valid = l->l_pcu_valid;
10777f33c2aSrmind int s;
108bc16d8a4Srmind
109a91e719fSmatt KASSERTMSG(l == curlwp, "l %p != curlwp %p", l, curlwp);
11038d699a2Srmind
111d67ab12cSrmind if (__predict_true(pcu_valid == 0)) {
11209ae87cfSmatt /* PCUs are not in use. */
11309ae87cfSmatt return;
11409ae87cfSmatt }
11577f33c2aSrmind s = splpcu();
11609ae87cfSmatt for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
117d67ab12cSrmind if ((pcu_valid & (1U << id)) == 0) {
11809ae87cfSmatt continue;
11909ae87cfSmatt }
12009ae87cfSmatt struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
121d4ce2713Sbouyer if (pcu_ci == l->l_cpu) {
122d4ce2713Sbouyer KASSERT(pcu_ci->ci_pcu_curlwp[id] == l);
12309ae87cfSmatt continue;
12409ae87cfSmatt }
12509ae87cfSmatt const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
126d67ab12cSrmind pcu->pcu_state_release(l);
12738d699a2Srmind }
12877f33c2aSrmind splx(s);
12938d699a2Srmind }
130bc16d8a4Srmind
131673662efSyamt /*
132673662efSyamt * pcu_discard_all: discard PCU state of the given LWP.
133673662efSyamt *
134673662efSyamt * Used by exec and LWP exit.
135673662efSyamt */
136f3c47d39Smatt void
pcu_discard_all(lwp_t * l)137f3c47d39Smatt pcu_discard_all(lwp_t *l)
138f3c47d39Smatt {
139d67ab12cSrmind const uint32_t pcu_valid = l->l_pcu_valid;
140f3c47d39Smatt
141319f97dfSthorpej /*
142319f97dfSthorpej * The check for LSIDL here is to catch the case where the LWP exits
143319f97dfSthorpej * due to an error in the LWP creation path before it ever runs.
144319f97dfSthorpej */
145319f97dfSthorpej KASSERT(l == curlwp || l->l_stat == LSIDL ||
146b0c8112fSriastradh (!pcu_available_p(l) && pcu_valid == 0));
147f3c47d39Smatt
148d67ab12cSrmind if (__predict_true(pcu_valid == 0)) {
149f3c47d39Smatt /* PCUs are not in use. */
150f3c47d39Smatt return;
151f3c47d39Smatt }
152f3c47d39Smatt for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
153d67ab12cSrmind if ((pcu_valid & (1U << id)) == 0) {
154f3c47d39Smatt continue;
155f3c47d39Smatt }
156f3c47d39Smatt if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
157f3c47d39Smatt continue;
158f3c47d39Smatt }
159f3c47d39Smatt const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
160d67ab12cSrmind pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
161f3c47d39Smatt }
162d67ab12cSrmind l->l_pcu_valid = 0;
163f3c47d39Smatt }
164f3c47d39Smatt
165673662efSyamt /*
166673662efSyamt * pcu_save_all: save PCU state of the given LWP so that eg. coredump can
167673662efSyamt * examine it.
168673662efSyamt */
169f3c47d39Smatt void
pcu_save_all(lwp_t * l)170f3c47d39Smatt pcu_save_all(lwp_t *l)
171f3c47d39Smatt {
172d67ab12cSrmind const uint32_t pcu_valid = l->l_pcu_valid;
173d67ab12cSrmind int flags = PCU_CMD_SAVE;
174d67ab12cSrmind
175d67ab12cSrmind /* If LW_WCORE, we are also releasing the state. */
176d67ab12cSrmind if (__predict_false(l->l_flag & LW_WCORE)) {
177d67ab12cSrmind flags |= PCU_CMD_RELEASE;
178d67ab12cSrmind }
179f3c47d39Smatt
1805ca5a72bSmatt /*
1815ca5a72bSmatt * Normally we save for the current LWP, but sometimes we get called
1825ca5a72bSmatt * with a different LWP (forking a system LWP or doing a coredump of
1835ca5a72bSmatt * a process with multiple threads) and we need to deal with that.
1845ca5a72bSmatt */
185b0c8112fSriastradh KASSERT(l == curlwp || ((!pcu_available_p(l) ||
186d67ab12cSrmind (curlwp->l_proc == l->l_proc && l->l_stat == LSSUSPENDED)) &&
187d67ab12cSrmind pcu_valid == 0));
188f3c47d39Smatt
189d67ab12cSrmind if (__predict_true(pcu_valid == 0)) {
190f3c47d39Smatt /* PCUs are not in use. */
191f3c47d39Smatt return;
192f3c47d39Smatt }
193f3c47d39Smatt for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
194d67ab12cSrmind if ((pcu_valid & (1U << id)) == 0) {
195f3c47d39Smatt continue;
196f3c47d39Smatt }
197f3c47d39Smatt if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
198f3c47d39Smatt continue;
199f3c47d39Smatt }
200f3c47d39Smatt const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
2015ca5a72bSmatt pcu_lwp_op(pcu, l, flags);
202f3c47d39Smatt }
203f3c47d39Smatt }
204f3c47d39Smatt
205bc16d8a4Srmind /*
20638d699a2Srmind * pcu_do_op: save/release PCU state on the current CPU.
207bc16d8a4Srmind *
20877f33c2aSrmind * => Must be called at IPL_PCU or from the interrupt.
209bc16d8a4Srmind */
21038d699a2Srmind static inline void
pcu_do_op(const pcu_ops_t * pcu,lwp_t * const l,const int flags)21138d699a2Srmind pcu_do_op(const pcu_ops_t *pcu, lwp_t * const l, const int flags)
21238d699a2Srmind {
21338d699a2Srmind struct cpu_info * const ci = curcpu();
21438d699a2Srmind const u_int id = pcu->pcu_id;
21538d699a2Srmind
216d67ab12cSrmind KASSERT(l->l_pcu_cpu[id] == ci);
21738d699a2Srmind
218d67ab12cSrmind if (flags & PCU_CMD_SAVE) {
219d67ab12cSrmind pcu->pcu_state_save(l);
22038d699a2Srmind }
221d67ab12cSrmind if (flags & PCU_CMD_RELEASE) {
222d67ab12cSrmind pcu->pcu_state_release(l);
22338d699a2Srmind ci->ci_pcu_curlwp[id] = NULL;
22438d699a2Srmind l->l_pcu_cpu[id] = NULL;
22538d699a2Srmind }
22638d699a2Srmind }
22738d699a2Srmind
22838d699a2Srmind /*
22977f33c2aSrmind * pcu_cpu_ipi: helper routine to call pcu_do_op() via ipi(9).
23038d699a2Srmind */
231bc16d8a4Srmind static void
pcu_cpu_ipi(void * arg)23277f33c2aSrmind pcu_cpu_ipi(void *arg)
233bc16d8a4Srmind {
23477f33c2aSrmind const pcu_ipi_msg_t *pcu_msg = arg;
235d67ab12cSrmind const pcu_ops_t *pcu = pcu_msg->pcu;
236bc16d8a4Srmind const u_int id = pcu->pcu_id;
237d67ab12cSrmind lwp_t *l = pcu_msg->owner;
23838d699a2Srmind
239d67ab12cSrmind KASSERT(pcu_msg->owner != NULL);
240bc16d8a4Srmind
241d67ab12cSrmind if (curcpu()->ci_pcu_curlwp[id] != l) {
242d67ab12cSrmind /*
243d67ab12cSrmind * Different ownership: another LWP raced with us and
244d67ab12cSrmind * perform save and release. There is nothing to do.
245d67ab12cSrmind */
246d67ab12cSrmind KASSERT(l->l_pcu_cpu[id] == NULL);
247bc16d8a4Srmind return;
248bc16d8a4Srmind }
249d67ab12cSrmind pcu_do_op(pcu, l, pcu_msg->flags);
250bc16d8a4Srmind }
251bc16d8a4Srmind
252bc16d8a4Srmind /*
253bc16d8a4Srmind * pcu_lwp_op: perform PCU state save, release or both operations on LWP.
254bc16d8a4Srmind */
255bc16d8a4Srmind static void
pcu_lwp_op(const pcu_ops_t * pcu,lwp_t * l,const int flags)25609ae87cfSmatt pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags)
257bc16d8a4Srmind {
258bc16d8a4Srmind const u_int id = pcu->pcu_id;
259bc16d8a4Srmind struct cpu_info *ci;
260bc16d8a4Srmind int s;
261bc16d8a4Srmind
262bc16d8a4Srmind /*
263bc16d8a4Srmind * Caller should have re-checked if there is any state to manage.
264bc16d8a4Srmind * Block the interrupts and inspect again, since cross-call sent
265bc16d8a4Srmind * by remote CPU could have changed the state.
266bc16d8a4Srmind */
26777f33c2aSrmind s = splpcu();
268bc16d8a4Srmind ci = l->l_pcu_cpu[id];
269bc16d8a4Srmind if (ci == curcpu()) {
270bc16d8a4Srmind /*
271bc16d8a4Srmind * State is on the current CPU - just perform the operations.
272bc16d8a4Srmind */
273b2a5597aSmatt KASSERTMSG(ci->ci_pcu_curlwp[id] == l,
274325494feSjym "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)",
275325494feSjym __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l);
27638d699a2Srmind pcu_do_op(pcu, l, flags);
277bc16d8a4Srmind splx(s);
278bc16d8a4Srmind return;
279bc16d8a4Srmind }
280bc16d8a4Srmind if (__predict_false(ci == NULL)) {
281bc16d8a4Srmind /* Cross-call has won the race - no state to manage. */
28277f33c2aSrmind splx(s);
283bc16d8a4Srmind return;
284bc16d8a4Srmind }
285bc16d8a4Srmind
286bc16d8a4Srmind /*
287d67ab12cSrmind * The state is on the remote CPU: perform the operation(s) there.
288bc16d8a4Srmind */
28977f33c2aSrmind pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l, .flags = flags };
29077f33c2aSrmind ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
29177f33c2aSrmind ipi_unicast(&ipi_msg, ci);
29277f33c2aSrmind splx(s);
29377f33c2aSrmind
29477f33c2aSrmind /* Wait for completion. */
29577f33c2aSrmind ipi_wait(&ipi_msg);
296bc16d8a4Srmind
297d67ab12cSrmind KASSERT((flags & PCU_CMD_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL);
298bc16d8a4Srmind }
299bc16d8a4Srmind
300bc16d8a4Srmind /*
301bc16d8a4Srmind * pcu_load: load/initialize the PCU state of current LWP on current CPU.
302bc16d8a4Srmind */
303bc16d8a4Srmind void
pcu_load(const pcu_ops_t * pcu)304bc16d8a4Srmind pcu_load(const pcu_ops_t *pcu)
305bc16d8a4Srmind {
306d67ab12cSrmind lwp_t *oncpu_lwp, * const l = curlwp;
307bc16d8a4Srmind const u_int id = pcu->pcu_id;
308bc16d8a4Srmind struct cpu_info *ci, *curci;
309bc16d8a4Srmind int s;
310bc16d8a4Srmind
311*0dec6ba3Sriastradh KASSERT(!cpu_intr_p());
312*0dec6ba3Sriastradh KASSERT(!cpu_softintr_p());
313bc16d8a4Srmind
31477f33c2aSrmind s = splpcu();
315bc16d8a4Srmind curci = curcpu();
316bc16d8a4Srmind ci = l->l_pcu_cpu[id];
317bc16d8a4Srmind
318bc16d8a4Srmind /* Does this CPU already have our PCU state loaded? */
319bc16d8a4Srmind if (ci == curci) {
32077f33c2aSrmind /*
32177f33c2aSrmind * Fault reoccurred while the PCU state is loaded and
32277f33c2aSrmind * therefore PCU should be re‐enabled. This happens
32377f33c2aSrmind * if LWP is context switched to another CPU and then
32477f33c2aSrmind * switched back to the original CPU while the state
32577f33c2aSrmind * on that CPU has not been changed by other LWPs.
32677f33c2aSrmind *
32777f33c2aSrmind * It may also happen due to instruction "bouncing" on
32877f33c2aSrmind * some architectures.
32977f33c2aSrmind */
330bc16d8a4Srmind KASSERT(curci->ci_pcu_curlwp[id] == l);
331877a3ccfSchs KASSERT(pcu_valid_p(pcu, l));
332d67ab12cSrmind pcu->pcu_state_load(l, PCU_VALID | PCU_REENABLE);
333bc16d8a4Srmind splx(s);
334bc16d8a4Srmind return;
335bc16d8a4Srmind }
336bc16d8a4Srmind
337bc16d8a4Srmind /* If PCU state of this LWP is on the remote CPU - save it there. */
338bc16d8a4Srmind if (ci) {
33977f33c2aSrmind pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l,
34077f33c2aSrmind .flags = PCU_CMD_SAVE | PCU_CMD_RELEASE };
34177f33c2aSrmind ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
34277f33c2aSrmind ipi_unicast(&ipi_msg, ci);
343bc16d8a4Srmind splx(s);
344d67ab12cSrmind
34577f33c2aSrmind /*
34677f33c2aSrmind * Wait for completion, re-enter IPL_PCU and re-fetch
34777f33c2aSrmind * the current CPU.
34877f33c2aSrmind */
34977f33c2aSrmind ipi_wait(&ipi_msg);
35077f33c2aSrmind s = splpcu();
351bc16d8a4Srmind curci = curcpu();
352bc16d8a4Srmind }
353bc16d8a4Srmind KASSERT(l->l_pcu_cpu[id] == NULL);
354bc16d8a4Srmind
355bc16d8a4Srmind /* Save the PCU state on the current CPU, if there is any. */
356d67ab12cSrmind if ((oncpu_lwp = curci->ci_pcu_curlwp[id]) != NULL) {
357d67ab12cSrmind pcu_do_op(pcu, oncpu_lwp, PCU_CMD_SAVE | PCU_CMD_RELEASE);
358bc16d8a4Srmind KASSERT(curci->ci_pcu_curlwp[id] == NULL);
359d67ab12cSrmind }
360bc16d8a4Srmind
361bc16d8a4Srmind /*
362bc16d8a4Srmind * Finally, load the state for this LWP on this CPU. Indicate to
363d67ab12cSrmind * the load function whether PCU state was valid before this call.
364bc16d8a4Srmind */
365d67ab12cSrmind const bool valid = ((1U << id) & l->l_pcu_valid) != 0;
366d67ab12cSrmind pcu->pcu_state_load(l, valid ? PCU_VALID : 0);
367d67ab12cSrmind curci->ci_pcu_curlwp[id] = l;
368d67ab12cSrmind l->l_pcu_cpu[id] = curci;
369d67ab12cSrmind l->l_pcu_valid |= (1U << id);
370bc16d8a4Srmind splx(s);
371bc16d8a4Srmind }
372bc16d8a4Srmind
373bc16d8a4Srmind /*
374877a3ccfSchs * pcu_discard: discard the PCU state of the given LWP. If "valid"
375d67ab12cSrmind * parameter is true, then keep considering the PCU state as valid.
376bc16d8a4Srmind */
377bc16d8a4Srmind void
pcu_discard(const pcu_ops_t * pcu,lwp_t * l,bool valid)378877a3ccfSchs pcu_discard(const pcu_ops_t *pcu, lwp_t *l, bool valid)
379bc16d8a4Srmind {
380bc16d8a4Srmind const u_int id = pcu->pcu_id;
381bc16d8a4Srmind
382*0dec6ba3Sriastradh KASSERT(!cpu_intr_p());
383*0dec6ba3Sriastradh KASSERT(!cpu_softintr_p());
384bc16d8a4Srmind
385d67ab12cSrmind if (__predict_false(valid)) {
386d67ab12cSrmind l->l_pcu_valid |= (1U << id);
387d67ab12cSrmind } else {
388d67ab12cSrmind l->l_pcu_valid &= ~(1U << id);
389d67ab12cSrmind }
390bc16d8a4Srmind if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
391bc16d8a4Srmind return;
392bc16d8a4Srmind }
393d67ab12cSrmind pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
394bc16d8a4Srmind }
395bc16d8a4Srmind
396bc16d8a4Srmind /*
397bc16d8a4Srmind * pcu_save_lwp: save PCU state to the given LWP.
398bc16d8a4Srmind */
399bc16d8a4Srmind void
pcu_save(const pcu_ops_t * pcu,lwp_t * l)400877a3ccfSchs pcu_save(const pcu_ops_t *pcu, lwp_t *l)
401bc16d8a4Srmind {
402bc16d8a4Srmind const u_int id = pcu->pcu_id;
403bc16d8a4Srmind
404*0dec6ba3Sriastradh KASSERT(!cpu_intr_p());
405*0dec6ba3Sriastradh KASSERT(!cpu_softintr_p());
406bc16d8a4Srmind
407bc16d8a4Srmind if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
408bc16d8a4Srmind return;
409bc16d8a4Srmind }
410d67ab12cSrmind pcu_lwp_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
411bc16d8a4Srmind }
412bc16d8a4Srmind
413bc16d8a4Srmind /*
414d67ab12cSrmind * pcu_save_all_on_cpu: save all PCU states on the current CPU.
41569aeb16cSdrochner */
41669aeb16cSdrochner void
pcu_save_all_on_cpu(void)41769aeb16cSdrochner pcu_save_all_on_cpu(void)
41869aeb16cSdrochner {
419d67ab12cSrmind int s;
42069aeb16cSdrochner
42177f33c2aSrmind s = splpcu();
42269aeb16cSdrochner for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
423d67ab12cSrmind const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
424d67ab12cSrmind lwp_t *l;
425d67ab12cSrmind
426d67ab12cSrmind if ((l = curcpu()->ci_pcu_curlwp[id]) != NULL) {
427d67ab12cSrmind pcu_do_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
42869aeb16cSdrochner }
42969aeb16cSdrochner }
430d67ab12cSrmind splx(s);
431d67ab12cSrmind }
43269aeb16cSdrochner
43369aeb16cSdrochner /*
434d67ab12cSrmind * pcu_valid_p: return true if PCU state is considered valid. Generally,
435d67ab12cSrmind * it always becomes "valid" when pcu_load() is called.
436bc16d8a4Srmind */
437bc16d8a4Srmind bool
pcu_valid_p(const pcu_ops_t * pcu,const lwp_t * l)438877a3ccfSchs pcu_valid_p(const pcu_ops_t *pcu, const lwp_t *l)
439bc16d8a4Srmind {
440bc16d8a4Srmind const u_int id = pcu->pcu_id;
441bc16d8a4Srmind
442d67ab12cSrmind return (l->l_pcu_valid & (1U << id)) != 0;
443bc16d8a4Srmind }
4447cefbb60Smatt
4457cefbb60Smatt #endif /* PCU_UNIT_COUNT > 0 */
446