xref: /netbsd-src/sys/kern/subr_kcpuset.c (revision 59e0001f2c76f540e0d2715b1f987bb546a238ec)
1*59e0001fSad /*	$NetBSD: subr_kcpuset.c,v 1.20 2023/09/23 18:21:11 ad Exp $	*/
252b220e9Srmind 
352b220e9Srmind /*-
4*59e0001fSad  * Copyright (c) 2011, 2023 The NetBSD Foundation, Inc.
552b220e9Srmind  * All rights reserved.
652b220e9Srmind  *
752b220e9Srmind  * This code is derived from software contributed to The NetBSD Foundation
852b220e9Srmind  * by Mindaugas Rasiukevicius.
952b220e9Srmind  *
1052b220e9Srmind  * Redistribution and use in source and binary forms, with or without
1152b220e9Srmind  * modification, are permitted provided that the following conditions
1252b220e9Srmind  * are met:
1352b220e9Srmind  * 1. Redistributions of source code must retain the above copyright
1452b220e9Srmind  *    notice, this list of conditions and the following disclaimer.
1552b220e9Srmind  * 2. Redistributions in binary form must reproduce the above copyright
1652b220e9Srmind  *    notice, this list of conditions and the following disclaimer in the
1752b220e9Srmind  *    documentation and/or other materials provided with the distribution.
1852b220e9Srmind  *
1952b220e9Srmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2052b220e9Srmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2152b220e9Srmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2252b220e9Srmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2352b220e9Srmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2452b220e9Srmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2552b220e9Srmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2652b220e9Srmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2752b220e9Srmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2852b220e9Srmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2952b220e9Srmind  * POSSIBILITY OF SUCH DAMAGE.
3052b220e9Srmind  */
3152b220e9Srmind 
3252b220e9Srmind /*
3352b220e9Srmind  * Kernel CPU set implementation.
3452b220e9Srmind  *
3552b220e9Srmind  * Interface can be used by kernel subsystems as a unified dynamic CPU
3652b220e9Srmind  * bitset implementation handling many CPUs.  Facility also supports early
3752b220e9Srmind  * use by MD code on boot, as it fixups bitsets on further boot.
3852b220e9Srmind  *
3952b220e9Srmind  * TODO:
4052b220e9Srmind  * - Handle "reverse" bitset on fixup/grow.
4152b220e9Srmind  */
4252b220e9Srmind 
4352b220e9Srmind #include <sys/cdefs.h>
44*59e0001fSad __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.20 2023/09/23 18:21:11 ad Exp $");
4552b220e9Srmind 
4652b220e9Srmind #include <sys/param.h>
4752b220e9Srmind #include <sys/types.h>
4852b220e9Srmind 
4952b220e9Srmind #include <sys/atomic.h>
508b4bc2e2Smartin #include <sys/intr.h>
5152b220e9Srmind #include <sys/sched.h>
5252b220e9Srmind #include <sys/kcpuset.h>
53*59e0001fSad #include <sys/kmem.h>
5452b220e9Srmind 
5552b220e9Srmind /* Number of CPUs to support. */
5652b220e9Srmind #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
5752b220e9Srmind 
5852b220e9Srmind /*
5952b220e9Srmind  * Structure of dynamic CPU set in the kernel.
6052b220e9Srmind  */
6152b220e9Srmind struct kcpuset {
6252b220e9Srmind 	uint32_t		bits[0];
6352b220e9Srmind };
6452b220e9Srmind 
6552b220e9Srmind typedef struct kcpuset_impl {
6652b220e9Srmind 	/* Reference count. */
6752b220e9Srmind 	u_int			kc_refcnt;
6852b220e9Srmind 	/* Next to free, if non-NULL (used when multiple references). */
6952b220e9Srmind 	struct kcpuset *	kc_next;
7052b220e9Srmind 	/* Actual variable-sized field of bits. */
7152b220e9Srmind 	struct kcpuset		kc_field;
7252b220e9Srmind } kcpuset_impl_t;
7352b220e9Srmind 
7452b220e9Srmind #define	KC_BITS_OFF		(offsetof(struct kcpuset_impl, kc_field))
7552b220e9Srmind #define	KC_GETSTRUCT(b)		((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
766e660710Smatt #define	KC_GETCSTRUCT(b)	((const kcpuset_impl_t *)((const char *)(b) - KC_BITS_OFF))
7752b220e9Srmind 
7852b220e9Srmind /* Sizes of a single bitset. */
7952b220e9Srmind #define	KC_SHIFT		5
8052b220e9Srmind #define	KC_MASK			31
8152b220e9Srmind 
8252b220e9Srmind /* An array of noted early kcpuset creations and data. */
8352b220e9Srmind #define	KC_SAVE_NITEMS		8
8452b220e9Srmind 
8552b220e9Srmind /* Structures for early boot mechanism (must be statically initialised). */
8652b220e9Srmind static kcpuset_t **		kc_noted_early[KC_SAVE_NITEMS];
8752b220e9Srmind static uint32_t			kc_bits_early[KC_SAVE_NITEMS];
8852b220e9Srmind static int			kc_last_idx = 0;
8952b220e9Srmind static bool			kc_initialised = false;
9052b220e9Srmind 
9152b220e9Srmind #define	KC_BITSIZE_EARLY	sizeof(kc_bits_early[0])
92c0a0b7fbSrmind #define	KC_NFIELDS_EARLY	1
9352b220e9Srmind 
9452b220e9Srmind /*
9552b220e9Srmind  * The size of whole bitset fields and amount of fields.
9652b220e9Srmind  * The whole size must statically initialise for early case.
9752b220e9Srmind  */
9852b220e9Srmind static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
9952b220e9Srmind static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
100*59e0001fSad static size_t			kc_memsize __read_mostly;
10152b220e9Srmind 
102e71c0035Srmind static kcpuset_t *		kcpuset_create_raw(bool);
10352b220e9Srmind 
10452b220e9Srmind /*
10552b220e9Srmind  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
10652b220e9Srmind  * to dynamically allocated sets.
10752b220e9Srmind  */
10852b220e9Srmind void
kcpuset_sysinit(void)10952b220e9Srmind kcpuset_sysinit(void)
11052b220e9Srmind {
11152b220e9Srmind 	kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
11252b220e9Srmind 	int i, s;
11352b220e9Srmind 
11452b220e9Srmind 	/* Set a kcpuset_t sizes. */
11552b220e9Srmind 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
11652b220e9Srmind 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
117*59e0001fSad 	kc_memsize = sizeof(kcpuset_impl_t) + kc_bitsize;
1180dec6ba3Sriastradh 	KASSERT(kc_nfields != 0);
1190dec6ba3Sriastradh 	KASSERT(kc_bitsize != 0);
12052b220e9Srmind 
12152b220e9Srmind 	/* First, pre-allocate kcpuset entries. */
12252b220e9Srmind 	for (i = 0; i < kc_last_idx; i++) {
123e71c0035Srmind 		kcp = kcpuset_create_raw(true);
12452b220e9Srmind 		kc_dynamic[i] = kcp;
12552b220e9Srmind 	}
12652b220e9Srmind 
12752b220e9Srmind 	/*
12852b220e9Srmind 	 * Prepare to convert all early noted kcpuset uses to dynamic sets.
12952b220e9Srmind 	 * All processors, except the one we are currently running (primary),
13052b220e9Srmind 	 * must not be spinned yet.  Since MD facilities can use kcpuset,
13152b220e9Srmind 	 * raise the IPL to high.
13252b220e9Srmind 	 */
13352b220e9Srmind 	KASSERT(mp_online == false);
13452b220e9Srmind 
13552b220e9Srmind 	s = splhigh();
13652b220e9Srmind 	for (i = 0; i < kc_last_idx; i++) {
13752b220e9Srmind 		/*
13852b220e9Srmind 		 * Transfer the bits from early static storage to the kcpuset.
13952b220e9Srmind 		 */
14052b220e9Srmind 		KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
14152b220e9Srmind 		memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
14252b220e9Srmind 
14352b220e9Srmind 		/*
14452b220e9Srmind 		 * Store the new pointer, pointing to the allocated kcpuset.
14552b220e9Srmind 		 * Note: we are not in an interrupt context and it is the only
14652b220e9Srmind 		 * CPU running - thus store is safe (e.g. no need for pointer
14752b220e9Srmind 		 * variable to be volatile).
14852b220e9Srmind 		 */
14952b220e9Srmind 		*kc_noted_early[i] = kc_dynamic[i];
15052b220e9Srmind 	}
15152b220e9Srmind 	kc_initialised = true;
15252b220e9Srmind 	kc_last_idx = 0;
15352b220e9Srmind 	splx(s);
15452b220e9Srmind }
15552b220e9Srmind 
15652b220e9Srmind /*
15752b220e9Srmind  * kcpuset_early_ptr: note an early boot use by saving the pointer and
15852b220e9Srmind  * returning a pointer to a static, temporary bit field.
15952b220e9Srmind  */
16052b220e9Srmind static kcpuset_t *
kcpuset_early_ptr(kcpuset_t ** kcptr)16152b220e9Srmind kcpuset_early_ptr(kcpuset_t **kcptr)
16252b220e9Srmind {
16352b220e9Srmind 	kcpuset_t *kcp;
16452b220e9Srmind 	int s;
16552b220e9Srmind 
16652b220e9Srmind 	s = splhigh();
16752b220e9Srmind 	if (kc_last_idx < KC_SAVE_NITEMS) {
16852b220e9Srmind 		/*
16952b220e9Srmind 		 * Save the pointer, return pointer to static early field.
17052b220e9Srmind 		 * Need to zero it out.
17152b220e9Srmind 		 */
1720c794722Srmind 		kc_noted_early[kc_last_idx] = kcptr;
17352b220e9Srmind 		kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
1740c794722Srmind 		kc_last_idx++;
17552b220e9Srmind 		memset(kcp, 0, KC_BITSIZE_EARLY);
17652b220e9Srmind 		KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
17752b220e9Srmind 	} else {
17852b220e9Srmind 		panic("kcpuset(9): all early-use entries exhausted; "
17952b220e9Srmind 		    "increase KC_SAVE_NITEMS\n");
18052b220e9Srmind 	}
18152b220e9Srmind 	splx(s);
18252b220e9Srmind 
18352b220e9Srmind 	return kcp;
18452b220e9Srmind }
18552b220e9Srmind 
18652b220e9Srmind /*
18752b220e9Srmind  * Routines to create or destroy the CPU set.
18852b220e9Srmind  * Early boot case is handled.
18952b220e9Srmind  */
19052b220e9Srmind 
19152b220e9Srmind static kcpuset_t *
kcpuset_create_raw(bool zero)192e71c0035Srmind kcpuset_create_raw(bool zero)
19352b220e9Srmind {
19452b220e9Srmind 	kcpuset_impl_t *kc;
19552b220e9Srmind 
196*59e0001fSad 	kc = kmem_alloc(kc_memsize, KM_SLEEP);
19752b220e9Srmind 	kc->kc_refcnt = 1;
19852b220e9Srmind 	kc->kc_next = NULL;
19952b220e9Srmind 
200e71c0035Srmind 	if (zero) {
201e71c0035Srmind 		memset(&kc->kc_field, 0, kc_bitsize);
202e71c0035Srmind 	}
203e71c0035Srmind 
20452b220e9Srmind 	/* Note: return pointer to the actual field of bits. */
20552b220e9Srmind 	KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
20652b220e9Srmind 	return &kc->kc_field;
20752b220e9Srmind }
20852b220e9Srmind 
20952b220e9Srmind void
kcpuset_create(kcpuset_t ** retkcp,bool zero)210e71c0035Srmind kcpuset_create(kcpuset_t **retkcp, bool zero)
21152b220e9Srmind {
21252b220e9Srmind 	if (__predict_false(!kc_initialised)) {
21352b220e9Srmind 		/* Early boot use - special case. */
21452b220e9Srmind 		*retkcp = kcpuset_early_ptr(retkcp);
21552b220e9Srmind 		return;
21652b220e9Srmind 	}
217e71c0035Srmind 	*retkcp = kcpuset_create_raw(zero);
21852b220e9Srmind }
21952b220e9Srmind 
22052b220e9Srmind void
kcpuset_clone(kcpuset_t ** retkcp,const kcpuset_t * kcp)2216e660710Smatt kcpuset_clone(kcpuset_t **retkcp, const kcpuset_t *kcp)
2226e660710Smatt {
2236e660710Smatt 	kcpuset_create(retkcp, false);
2246e660710Smatt 	memcpy(*retkcp, kcp, kc_bitsize);
2256e660710Smatt }
2266e660710Smatt 
2276e660710Smatt void
kcpuset_destroy(kcpuset_t * kcp)22852b220e9Srmind kcpuset_destroy(kcpuset_t *kcp)
22952b220e9Srmind {
230*59e0001fSad 	const size_t size = kc_memsize;
231501dd321Srmind 	kcpuset_impl_t *kc;
23252b220e9Srmind 
23352b220e9Srmind 	KASSERT(kc_initialised);
23452b220e9Srmind 	KASSERT(kcp != NULL);
23552b220e9Srmind 
23652b220e9Srmind 	do {
237501dd321Srmind 		kc = KC_GETSTRUCT(kcp);
238501dd321Srmind 		kcp = kc->kc_next;
239*59e0001fSad 		kmem_free(kc, size);
240501dd321Srmind 	} while (kcp);
24152b220e9Srmind }
24252b220e9Srmind 
24352b220e9Srmind /*
244c0a0b7fbSrmind  * Routines to reference/unreference the CPU set.
24552b220e9Srmind  * Note: early boot case is not supported by these routines.
24652b220e9Srmind  */
24752b220e9Srmind 
24852b220e9Srmind void
kcpuset_use(kcpuset_t * kcp)24952b220e9Srmind kcpuset_use(kcpuset_t *kcp)
25052b220e9Srmind {
25152b220e9Srmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
25252b220e9Srmind 
25352b220e9Srmind 	KASSERT(kc_initialised);
25452b220e9Srmind 	atomic_inc_uint(&kc->kc_refcnt);
25552b220e9Srmind }
25652b220e9Srmind 
25752b220e9Srmind void
kcpuset_unuse(kcpuset_t * kcp,kcpuset_t ** lst)25852b220e9Srmind kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
25952b220e9Srmind {
26052b220e9Srmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
26152b220e9Srmind 
26252b220e9Srmind 	KASSERT(kc_initialised);
26352b220e9Srmind 	KASSERT(kc->kc_refcnt > 0);
26452b220e9Srmind 
265ef3476fbSriastradh 	membar_release();
26652b220e9Srmind 	if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
26752b220e9Srmind 		return;
26852b220e9Srmind 	}
269ef3476fbSriastradh 	membar_acquire();
27052b220e9Srmind 	KASSERT(kc->kc_next == NULL);
27152b220e9Srmind 	if (lst == NULL) {
27252b220e9Srmind 		kcpuset_destroy(kcp);
27352b220e9Srmind 		return;
27452b220e9Srmind 	}
27552b220e9Srmind 	kc->kc_next = *lst;
27652b220e9Srmind 	*lst = kcp;
27752b220e9Srmind }
27852b220e9Srmind 
27952b220e9Srmind /*
28052b220e9Srmind  * Routines to transfer the CPU set from / to userspace.
28152b220e9Srmind  * Note: early boot case is not supported by these routines.
28252b220e9Srmind  */
28352b220e9Srmind 
28452b220e9Srmind int
kcpuset_copyin(const cpuset_t * ucp,kcpuset_t * kcp,size_t len)28552b220e9Srmind kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
28652b220e9Srmind {
2876a2419feSmartin 	kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
28852b220e9Srmind 
28952b220e9Srmind 	KASSERT(kc_initialised);
29052b220e9Srmind 	KASSERT(kc->kc_refcnt > 0);
29152b220e9Srmind 	KASSERT(kc->kc_next == NULL);
29252b220e9Srmind 
2930c794722Srmind 	if (len > kc_bitsize) { /* XXX */
29452b220e9Srmind 		return EINVAL;
29552b220e9Srmind 	}
2960c794722Srmind 	return copyin(ucp, kcp, len);
29752b220e9Srmind }
29852b220e9Srmind 
29952b220e9Srmind int
kcpuset_copyout(kcpuset_t * kcp,cpuset_t * ucp,size_t len)30052b220e9Srmind kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
30152b220e9Srmind {
3026a2419feSmartin 	kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
30352b220e9Srmind 
30452b220e9Srmind 	KASSERT(kc_initialised);
30552b220e9Srmind 	KASSERT(kc->kc_refcnt > 0);
30652b220e9Srmind 	KASSERT(kc->kc_next == NULL);
30752b220e9Srmind 
3080c794722Srmind 	if (len > kc_bitsize) { /* XXX */
30952b220e9Srmind 		return EINVAL;
31052b220e9Srmind 	}
3110c794722Srmind 	return copyout(kcp, ucp, len);
31252b220e9Srmind }
31352b220e9Srmind 
314e75fa093Srmind void
kcpuset_export_u32(const kcpuset_t * kcp,uint32_t * bitfield,size_t len)3154dc5d077Srmind kcpuset_export_u32(const kcpuset_t *kcp, uint32_t *bitfield, size_t len)
316e75fa093Srmind {
317e75fa093Srmind 	size_t rlen = MIN(kc_bitsize, len);
318e75fa093Srmind 
319e75fa093Srmind 	KASSERT(kcp != NULL);
320e75fa093Srmind 	memcpy(bitfield, kcp->bits, rlen);
321e75fa093Srmind }
322e75fa093Srmind 
32352b220e9Srmind /*
324c0a0b7fbSrmind  * Routines to change bit field - zero, fill, copy, set, unset, etc.
32552b220e9Srmind  */
32652b220e9Srmind 
32752b220e9Srmind void
kcpuset_zero(kcpuset_t * kcp)32852b220e9Srmind kcpuset_zero(kcpuset_t *kcp)
32952b220e9Srmind {
33052b220e9Srmind 
33152b220e9Srmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
33252b220e9Srmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
33352b220e9Srmind 	memset(kcp, 0, kc_bitsize);
33452b220e9Srmind }
33552b220e9Srmind 
33652b220e9Srmind void
kcpuset_fill(kcpuset_t * kcp)33752b220e9Srmind kcpuset_fill(kcpuset_t *kcp)
33852b220e9Srmind {
33952b220e9Srmind 
34052b220e9Srmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
34152b220e9Srmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
34252b220e9Srmind 	memset(kcp, ~0, kc_bitsize);
34352b220e9Srmind }
34452b220e9Srmind 
34552b220e9Srmind void
kcpuset_copy(kcpuset_t * dkcp,const kcpuset_t * skcp)3466e660710Smatt kcpuset_copy(kcpuset_t *dkcp, const kcpuset_t *skcp)
347c0a0b7fbSrmind {
348c0a0b7fbSrmind 
349c0a0b7fbSrmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_refcnt > 0);
350c0a0b7fbSrmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_next == NULL);
351c0a0b7fbSrmind 	memcpy(dkcp, skcp, kc_bitsize);
352c0a0b7fbSrmind }
353c0a0b7fbSrmind 
354c0a0b7fbSrmind void
kcpuset_set(kcpuset_t * kcp,cpuid_t i)35552b220e9Srmind kcpuset_set(kcpuset_t *kcp, cpuid_t i)
35652b220e9Srmind {
35752b220e9Srmind 	const size_t j = i >> KC_SHIFT;
35852b220e9Srmind 
35952b220e9Srmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
36052b220e9Srmind 	KASSERT(j < kc_nfields);
36152b220e9Srmind 
36201b1ce77Smsaitoh 	kcp->bits[j] |= __BIT(i & KC_MASK);
36352b220e9Srmind }
36452b220e9Srmind 
36552b220e9Srmind void
kcpuset_clear(kcpuset_t * kcp,cpuid_t i)36652b220e9Srmind kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
36752b220e9Srmind {
36852b220e9Srmind 	const size_t j = i >> KC_SHIFT;
36952b220e9Srmind 
3706e660710Smatt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
37152b220e9Srmind 	KASSERT(j < kc_nfields);
37252b220e9Srmind 
37301b1ce77Smsaitoh 	kcp->bits[j] &= ~(__BIT(i & KC_MASK));
37452b220e9Srmind }
37552b220e9Srmind 
376c0a0b7fbSrmind bool
kcpuset_isset(const kcpuset_t * kcp,cpuid_t i)3776e660710Smatt kcpuset_isset(const kcpuset_t *kcp, cpuid_t i)
37852b220e9Srmind {
37952b220e9Srmind 	const size_t j = i >> KC_SHIFT;
38052b220e9Srmind 
38152b220e9Srmind 	KASSERT(kcp != NULL);
3826e660710Smatt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_refcnt > 0);
3836e660710Smatt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
38452b220e9Srmind 	KASSERT(j < kc_nfields);
38552b220e9Srmind 
38601b1ce77Smsaitoh 	return ((__BIT(i & KC_MASK)) & kcp->bits[j]) != 0;
38752b220e9Srmind }
38852b220e9Srmind 
38952b220e9Srmind bool
kcpuset_isotherset(const kcpuset_t * kcp,cpuid_t i)3906e660710Smatt kcpuset_isotherset(const kcpuset_t *kcp, cpuid_t i)
391c0a0b7fbSrmind {
392c0a0b7fbSrmind 	const size_t j2 = i >> KC_SHIFT;
39301b1ce77Smsaitoh 	const uint32_t mask = ~(__BIT(i & KC_MASK));
394c0a0b7fbSrmind 
395c0a0b7fbSrmind 	for (size_t j = 0; j < kc_nfields; j++) {
396c0a0b7fbSrmind 		const uint32_t bits = kcp->bits[j];
397c0a0b7fbSrmind 		if (bits && (j != j2 || (bits & mask) != 0)) {
398c0a0b7fbSrmind 			return true;
399c0a0b7fbSrmind 		}
400c0a0b7fbSrmind 	}
401c0a0b7fbSrmind 	return false;
402c0a0b7fbSrmind }
403c0a0b7fbSrmind 
404c0a0b7fbSrmind bool
kcpuset_iszero(const kcpuset_t * kcp)4056e660710Smatt kcpuset_iszero(const kcpuset_t *kcp)
40652b220e9Srmind {
40752b220e9Srmind 
40852b220e9Srmind 	for (size_t j = 0; j < kc_nfields; j++) {
40952b220e9Srmind 		if (kcp->bits[j] != 0) {
41052b220e9Srmind 			return false;
41152b220e9Srmind 		}
41252b220e9Srmind 	}
41352b220e9Srmind 	return true;
41452b220e9Srmind }
41552b220e9Srmind 
41652b220e9Srmind bool
kcpuset_match(const kcpuset_t * kcp1,const kcpuset_t * kcp2)41752b220e9Srmind kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
41852b220e9Srmind {
41952b220e9Srmind 
42052b220e9Srmind 	return memcmp(kcp1, kcp2, kc_bitsize) == 0;
42152b220e9Srmind }
422e71c0035Srmind 
4236e660710Smatt bool
kcpuset_intersecting_p(const kcpuset_t * kcp1,const kcpuset_t * kcp2)4246e660710Smatt kcpuset_intersecting_p(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
4256e660710Smatt {
4266e660710Smatt 
4276e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
4286e660710Smatt 		if (kcp1->bits[j] & kcp2->bits[j])
4296e660710Smatt 			return true;
4306e660710Smatt 	}
4316e660710Smatt 	return false;
4326e660710Smatt }
4336e660710Smatt 
4346e660710Smatt cpuid_t
kcpuset_ffs(const kcpuset_t * kcp)4356e660710Smatt kcpuset_ffs(const kcpuset_t *kcp)
4366e660710Smatt {
4376e660710Smatt 
4386e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
4396e660710Smatt 		if (kcp->bits[j])
4406e660710Smatt 			return 32 * j + ffs(kcp->bits[j]);
4416e660710Smatt 	}
4426e660710Smatt 	return 0;
4436e660710Smatt }
4446e660710Smatt 
4456e660710Smatt cpuid_t
kcpuset_ffs_intersecting(const kcpuset_t * kcp1,const kcpuset_t * kcp2)4466e660710Smatt kcpuset_ffs_intersecting(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
4476e660710Smatt {
4486e660710Smatt 
4496e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
4506e660710Smatt 		uint32_t bits = kcp1->bits[j] & kcp2->bits[j];
4516e660710Smatt 		if (bits)
4526e660710Smatt 			return 32 * j + ffs(bits);
4536e660710Smatt 	}
4546e660710Smatt 	return 0;
4556e660710Smatt }
4566e660710Smatt 
457e71c0035Srmind void
kcpuset_merge(kcpuset_t * kcp1,const kcpuset_t * kcp2)4586e660710Smatt kcpuset_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
459e71c0035Srmind {
460e71c0035Srmind 
461e71c0035Srmind 	for (size_t j = 0; j < kc_nfields; j++) {
462e71c0035Srmind 		kcp1->bits[j] |= kcp2->bits[j];
463e71c0035Srmind 	}
464e71c0035Srmind }
465e71c0035Srmind 
4660c794722Srmind void
kcpuset_intersect(kcpuset_t * kcp1,const kcpuset_t * kcp2)4676e660710Smatt kcpuset_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
4680c794722Srmind {
4690c794722Srmind 
4700c794722Srmind 	for (size_t j = 0; j < kc_nfields; j++) {
4710c794722Srmind 		kcp1->bits[j] &= kcp2->bits[j];
4720c794722Srmind 	}
4730c794722Srmind }
4740c794722Srmind 
4756e660710Smatt void
kcpuset_remove(kcpuset_t * kcp1,const kcpuset_t * kcp2)4766e660710Smatt kcpuset_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
4776e660710Smatt {
4786e660710Smatt 
4796e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
4806e660710Smatt 		kcp1->bits[j] &= ~kcp2->bits[j];
4816e660710Smatt 	}
4826e660710Smatt }
4836e660710Smatt 
484c0a0b7fbSrmind int
kcpuset_countset(const kcpuset_t * kcp)485a6e0a15fSrmind kcpuset_countset(const kcpuset_t *kcp)
486c0a0b7fbSrmind {
487c0a0b7fbSrmind 	int count = 0;
488c0a0b7fbSrmind 
489c0a0b7fbSrmind 	for (size_t j = 0; j < kc_nfields; j++) {
490c0a0b7fbSrmind 		count += popcount32(kcp->bits[j]);
491c0a0b7fbSrmind 	}
492c0a0b7fbSrmind 	return count;
493c0a0b7fbSrmind }
494c0a0b7fbSrmind 
495e71c0035Srmind /*
496e71c0035Srmind  * Routines to set/clear the flags atomically.
497e71c0035Srmind  */
498e71c0035Srmind 
499e71c0035Srmind void
kcpuset_atomic_set(kcpuset_t * kcp,cpuid_t i)500e71c0035Srmind kcpuset_atomic_set(kcpuset_t *kcp, cpuid_t i)
501e71c0035Srmind {
502e71c0035Srmind 	const size_t j = i >> KC_SHIFT;
503e71c0035Srmind 
504e71c0035Srmind 	KASSERT(j < kc_nfields);
50501b1ce77Smsaitoh 	atomic_or_32(&kcp->bits[j], __BIT(i & KC_MASK));
506e71c0035Srmind }
507e71c0035Srmind 
508e71c0035Srmind void
kcpuset_atomic_clear(kcpuset_t * kcp,cpuid_t i)509e71c0035Srmind kcpuset_atomic_clear(kcpuset_t *kcp, cpuid_t i)
510e71c0035Srmind {
511e71c0035Srmind 	const size_t j = i >> KC_SHIFT;
512e71c0035Srmind 
513e71c0035Srmind 	KASSERT(j < kc_nfields);
51401b1ce77Smsaitoh 	atomic_and_32(&kcp->bits[j], ~(__BIT(i & KC_MASK)));
515e71c0035Srmind }
5166e660710Smatt 
5176e660710Smatt void
kcpuset_atomicly_intersect(kcpuset_t * kcp1,const kcpuset_t * kcp2)5186e660710Smatt kcpuset_atomicly_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
5196e660710Smatt {
5206e660710Smatt 
5216e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
5226e660710Smatt 		if (kcp2->bits[j])
5236e660710Smatt 			atomic_and_32(&kcp1->bits[j], kcp2->bits[j]);
5246e660710Smatt 	}
5256e660710Smatt }
5266e660710Smatt 
5276e660710Smatt void
kcpuset_atomicly_merge(kcpuset_t * kcp1,const kcpuset_t * kcp2)5286e660710Smatt kcpuset_atomicly_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
5296e660710Smatt {
5306e660710Smatt 
5316e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
5326e660710Smatt 		if (kcp2->bits[j])
5336e660710Smatt 			atomic_or_32(&kcp1->bits[j], kcp2->bits[j]);
5346e660710Smatt 	}
5356e660710Smatt }
5366e660710Smatt 
5376e660710Smatt void
kcpuset_atomicly_remove(kcpuset_t * kcp1,const kcpuset_t * kcp2)5386e660710Smatt kcpuset_atomicly_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
5396e660710Smatt {
5406e660710Smatt 
5416e660710Smatt 	for (size_t j = 0; j < kc_nfields; j++) {
5426e660710Smatt 		if (kcp2->bits[j])
5436e660710Smatt 			atomic_and_32(&kcp1->bits[j], ~kcp2->bits[j]);
5446e660710Smatt 	}
5456e660710Smatt }
546