10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52006Sandrei * Common Development and Distribution License (the "License").
62006Sandrei * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
229489SJoe.Bonasera@sun.com * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
25*12004Sjiang.liu@intel.com /*
26*12004Sjiang.liu@intel.com * Copyright (c) 2010, Intel Corporation.
27*12004Sjiang.liu@intel.com * All rights reserved.
28*12004Sjiang.liu@intel.com */
290Sstevel@tonic-gate
300Sstevel@tonic-gate #include <sys/types.h>
310Sstevel@tonic-gate #include <sys/param.h>
320Sstevel@tonic-gate #include <sys/t_lock.h>
330Sstevel@tonic-gate #include <sys/thread.h>
340Sstevel@tonic-gate #include <sys/cpuvar.h>
350Sstevel@tonic-gate #include <sys/x_call.h>
369489SJoe.Bonasera@sun.com #include <sys/xc_levels.h>
370Sstevel@tonic-gate #include <sys/cpu.h>
380Sstevel@tonic-gate #include <sys/psw.h>
390Sstevel@tonic-gate #include <sys/sunddi.h>
400Sstevel@tonic-gate #include <sys/debug.h>
410Sstevel@tonic-gate #include <sys/systm.h>
423446Smrj #include <sys/archsystm.h>
430Sstevel@tonic-gate #include <sys/machsystm.h>
440Sstevel@tonic-gate #include <sys/mutex_impl.h>
459489SJoe.Bonasera@sun.com #include <sys/stack.h>
469489SJoe.Bonasera@sun.com #include <sys/promif.h>
479489SJoe.Bonasera@sun.com #include <sys/x86_archext.h>
480Sstevel@tonic-gate
499489SJoe.Bonasera@sun.com /*
509489SJoe.Bonasera@sun.com * Implementation for cross-processor calls via interprocessor interrupts
519489SJoe.Bonasera@sun.com *
529489SJoe.Bonasera@sun.com * This implementation uses a message passing architecture to allow multiple
539489SJoe.Bonasera@sun.com * concurrent cross calls to be in flight at any given time. We use the cmpxchg
549489SJoe.Bonasera@sun.com * instruction, aka casptr(), to implement simple efficient work queues for
559489SJoe.Bonasera@sun.com * message passing between CPUs with almost no need for regular locking.
569489SJoe.Bonasera@sun.com * See xc_extract() and xc_insert() below.
579489SJoe.Bonasera@sun.com *
589489SJoe.Bonasera@sun.com * The general idea is that initiating a cross call means putting a message
599489SJoe.Bonasera@sun.com * on a target(s) CPU's work queue. Any synchronization is handled by passing
609489SJoe.Bonasera@sun.com * the message back and forth between initiator and target(s).
619489SJoe.Bonasera@sun.com *
629489SJoe.Bonasera@sun.com * Every CPU has xc_work_cnt, which indicates it has messages to process.
639489SJoe.Bonasera@sun.com * This value is incremented as message traffic is initiated and decremented
649489SJoe.Bonasera@sun.com * with every message that finishes all processing.
659489SJoe.Bonasera@sun.com *
669489SJoe.Bonasera@sun.com * The code needs no mfence or other membar_*() calls. The uses of
679489SJoe.Bonasera@sun.com * casptr(), cas32() and atomic_dec_32() for the message passing are
689489SJoe.Bonasera@sun.com * implemented with LOCK prefix instructions which are equivalent to mfence.
699489SJoe.Bonasera@sun.com *
709489SJoe.Bonasera@sun.com * One interesting aspect of this implmentation is that it allows 2 or more
719489SJoe.Bonasera@sun.com * CPUs to initiate cross calls to intersecting sets of CPUs at the same time.
729489SJoe.Bonasera@sun.com * The cross call processing by the CPUs will happen in any order with only
739489SJoe.Bonasera@sun.com * a guarantee, for xc_call() and xc_sync(), that an initiator won't return
749489SJoe.Bonasera@sun.com * from cross calls before all slaves have invoked the function.
759489SJoe.Bonasera@sun.com *
769489SJoe.Bonasera@sun.com * The reason for this asynchronous approach is to allow for fast global
779489SJoe.Bonasera@sun.com * TLB shootdowns. If all CPUs, say N, tried to do a global TLB invalidation
789489SJoe.Bonasera@sun.com * on a different Virtual Address at the same time. The old code required
799489SJoe.Bonasera@sun.com * N squared IPIs. With this method, depending on timing, it could happen
809489SJoe.Bonasera@sun.com * with just N IPIs.
819489SJoe.Bonasera@sun.com */
820Sstevel@tonic-gate
839489SJoe.Bonasera@sun.com /*
849489SJoe.Bonasera@sun.com * The default is to not enable collecting counts of IPI information, since
859489SJoe.Bonasera@sun.com * the updating of shared cachelines could cause excess bus traffic.
869489SJoe.Bonasera@sun.com */
879489SJoe.Bonasera@sun.com uint_t xc_collect_enable = 0;
889489SJoe.Bonasera@sun.com uint64_t xc_total_cnt = 0; /* total #IPIs sent for cross calls */
899489SJoe.Bonasera@sun.com uint64_t xc_multi_cnt = 0; /* # times we piggy backed on another IPI */
909489SJoe.Bonasera@sun.com
919489SJoe.Bonasera@sun.com /*
929489SJoe.Bonasera@sun.com * Values for message states. Here are the normal transitions. A transition
939489SJoe.Bonasera@sun.com * of "->" happens in the slave cpu and "=>" happens in the master cpu as
949489SJoe.Bonasera@sun.com * the messages are passed back and forth.
959489SJoe.Bonasera@sun.com *
969489SJoe.Bonasera@sun.com * FREE => ASYNC -> DONE => FREE
979489SJoe.Bonasera@sun.com * FREE => CALL -> DONE => FREE
989489SJoe.Bonasera@sun.com * FREE => SYNC -> WAITING => RELEASED -> DONE => FREE
999489SJoe.Bonasera@sun.com *
1009489SJoe.Bonasera@sun.com * The interesing one above is ASYNC. You might ask, why not go directly
1019489SJoe.Bonasera@sun.com * to FREE, instead of DONE. If it did that, it might be possible to exhaust
1029489SJoe.Bonasera@sun.com * the master's xc_free list if a master can generate ASYNC messages faster
1039489SJoe.Bonasera@sun.com * then the slave can process them. That could be handled with more complicated
1049489SJoe.Bonasera@sun.com * handling. However since nothing important uses ASYNC, I've not bothered.
1059489SJoe.Bonasera@sun.com */
1069489SJoe.Bonasera@sun.com #define XC_MSG_FREE (0) /* msg in xc_free queue */
1079489SJoe.Bonasera@sun.com #define XC_MSG_ASYNC (1) /* msg in slave xc_msgbox */
1089489SJoe.Bonasera@sun.com #define XC_MSG_CALL (2) /* msg in slave xc_msgbox */
1099489SJoe.Bonasera@sun.com #define XC_MSG_SYNC (3) /* msg in slave xc_msgbox */
1109489SJoe.Bonasera@sun.com #define XC_MSG_WAITING (4) /* msg in master xc_msgbox or xc_waiters */
1119489SJoe.Bonasera@sun.com #define XC_MSG_RELEASED (5) /* msg in slave xc_msgbox */
1129489SJoe.Bonasera@sun.com #define XC_MSG_DONE (6) /* msg in master xc_msgbox */
1139489SJoe.Bonasera@sun.com
1149489SJoe.Bonasera@sun.com /*
1159489SJoe.Bonasera@sun.com * We allow for one high priority message at a time to happen in the system.
1169489SJoe.Bonasera@sun.com * This is used for panic, kmdb, etc., so no locking is done.
1179489SJoe.Bonasera@sun.com */
1189568SJoe.Bonasera@sun.com static volatile cpuset_t xc_priority_set_store;
1199568SJoe.Bonasera@sun.com static volatile ulong_t *xc_priority_set = CPUSET2BV(xc_priority_set_store);
1209489SJoe.Bonasera@sun.com static xc_data_t xc_priority_data;
1219489SJoe.Bonasera@sun.com
1229489SJoe.Bonasera@sun.com /*
1239568SJoe.Bonasera@sun.com * Wrappers to avoid C compiler warnings due to volatile. The atomic bit
1249568SJoe.Bonasera@sun.com * operations don't accept volatile bit vectors - which is a bit silly.
1259568SJoe.Bonasera@sun.com */
1269568SJoe.Bonasera@sun.com #define XC_BT_SET(vector, b) BT_ATOMIC_SET((ulong_t *)(vector), (b))
1279568SJoe.Bonasera@sun.com #define XC_BT_CLEAR(vector, b) BT_ATOMIC_CLEAR((ulong_t *)(vector), (b))
1289568SJoe.Bonasera@sun.com
1299568SJoe.Bonasera@sun.com /*
1309489SJoe.Bonasera@sun.com * Decrement a CPU's work count
1319489SJoe.Bonasera@sun.com */
1329489SJoe.Bonasera@sun.com static void
xc_decrement(struct machcpu * mcpu)1339489SJoe.Bonasera@sun.com xc_decrement(struct machcpu *mcpu)
1340Sstevel@tonic-gate {
1359489SJoe.Bonasera@sun.com atomic_dec_32(&mcpu->xc_work_cnt);
1360Sstevel@tonic-gate }
1370Sstevel@tonic-gate
1380Sstevel@tonic-gate /*
1399489SJoe.Bonasera@sun.com * Increment a CPU's work count and return the old value
1400Sstevel@tonic-gate */
1419489SJoe.Bonasera@sun.com static int
xc_increment(struct machcpu * mcpu)1429489SJoe.Bonasera@sun.com xc_increment(struct machcpu *mcpu)
1430Sstevel@tonic-gate {
1449489SJoe.Bonasera@sun.com int old;
1459489SJoe.Bonasera@sun.com do {
1469489SJoe.Bonasera@sun.com old = mcpu->xc_work_cnt;
1479489SJoe.Bonasera@sun.com } while (cas32((uint32_t *)&mcpu->xc_work_cnt, old, old + 1) != old);
1489489SJoe.Bonasera@sun.com return (old);
1499489SJoe.Bonasera@sun.com }
1503446Smrj
1519489SJoe.Bonasera@sun.com /*
1529489SJoe.Bonasera@sun.com * Put a message into a queue. The insertion is atomic no matter
1539489SJoe.Bonasera@sun.com * how many different inserts/extracts to the same queue happen.
1549489SJoe.Bonasera@sun.com */
1559489SJoe.Bonasera@sun.com static void
xc_insert(void * queue,xc_msg_t * msg)1569489SJoe.Bonasera@sun.com xc_insert(void *queue, xc_msg_t *msg)
1579489SJoe.Bonasera@sun.com {
1589489SJoe.Bonasera@sun.com xc_msg_t *old_head;
15910022SJoe.Bonasera@sun.com
16010022SJoe.Bonasera@sun.com /*
16110022SJoe.Bonasera@sun.com * FREE messages should only ever be getting inserted into
16210022SJoe.Bonasera@sun.com * the xc_master CPUs xc_free queue.
16310022SJoe.Bonasera@sun.com */
16410022SJoe.Bonasera@sun.com ASSERT(msg->xc_command != XC_MSG_FREE ||
16510022SJoe.Bonasera@sun.com cpu[msg->xc_master] == NULL || /* possible only during init */
16610022SJoe.Bonasera@sun.com queue == &cpu[msg->xc_master]->cpu_m.xc_free);
16710022SJoe.Bonasera@sun.com
1689489SJoe.Bonasera@sun.com do {
1699489SJoe.Bonasera@sun.com old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
1709489SJoe.Bonasera@sun.com msg->xc_next = old_head;
1719489SJoe.Bonasera@sun.com } while (casptr(queue, old_head, msg) != old_head);
1720Sstevel@tonic-gate }
1730Sstevel@tonic-gate
1740Sstevel@tonic-gate /*
1759489SJoe.Bonasera@sun.com * Extract a message from a queue. The extraction is atomic only
1769489SJoe.Bonasera@sun.com * when just one thread does extractions from the queue.
1779489SJoe.Bonasera@sun.com * If the queue is empty, NULL is returned.
1789489SJoe.Bonasera@sun.com */
1799489SJoe.Bonasera@sun.com static xc_msg_t *
xc_extract(xc_msg_t ** queue)1809489SJoe.Bonasera@sun.com xc_extract(xc_msg_t **queue)
1819489SJoe.Bonasera@sun.com {
1829489SJoe.Bonasera@sun.com xc_msg_t *old_head;
1839489SJoe.Bonasera@sun.com
1849489SJoe.Bonasera@sun.com do {
1859489SJoe.Bonasera@sun.com old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
1869489SJoe.Bonasera@sun.com if (old_head == NULL)
1879489SJoe.Bonasera@sun.com return (old_head);
1889489SJoe.Bonasera@sun.com } while (casptr(queue, old_head, old_head->xc_next) != old_head);
1899489SJoe.Bonasera@sun.com old_head->xc_next = NULL;
1909489SJoe.Bonasera@sun.com return (old_head);
1919489SJoe.Bonasera@sun.com }
1929489SJoe.Bonasera@sun.com
1939489SJoe.Bonasera@sun.com /*
1949489SJoe.Bonasera@sun.com * Initialize the machcpu fields used for cross calls
1959489SJoe.Bonasera@sun.com */
1969489SJoe.Bonasera@sun.com static uint_t xc_initialized = 0;
197*12004Sjiang.liu@intel.com
1989489SJoe.Bonasera@sun.com void
xc_init_cpu(struct cpu * cpup)1999489SJoe.Bonasera@sun.com xc_init_cpu(struct cpu *cpup)
2009489SJoe.Bonasera@sun.com {
2019489SJoe.Bonasera@sun.com xc_msg_t *msg;
2029489SJoe.Bonasera@sun.com int c;
2039489SJoe.Bonasera@sun.com
2049489SJoe.Bonasera@sun.com /*
205*12004Sjiang.liu@intel.com * Allocate message buffers for the new CPU.
2069489SJoe.Bonasera@sun.com */
207*12004Sjiang.liu@intel.com for (c = 0; c < max_ncpus; ++c) {
208*12004Sjiang.liu@intel.com if (plat_dr_support_cpu()) {
209*12004Sjiang.liu@intel.com /*
210*12004Sjiang.liu@intel.com * Allocate a message buffer for every CPU possible
211*12004Sjiang.liu@intel.com * in system, including our own, and add them to our xc
212*12004Sjiang.liu@intel.com * message queue.
213*12004Sjiang.liu@intel.com */
214*12004Sjiang.liu@intel.com msg = kmem_zalloc(sizeof (*msg), KM_SLEEP);
215*12004Sjiang.liu@intel.com msg->xc_command = XC_MSG_FREE;
216*12004Sjiang.liu@intel.com msg->xc_master = cpup->cpu_id;
217*12004Sjiang.liu@intel.com xc_insert(&cpup->cpu_m.xc_free, msg);
218*12004Sjiang.liu@intel.com } else if (cpu[c] != NULL && cpu[c] != cpup) {
219*12004Sjiang.liu@intel.com /*
220*12004Sjiang.liu@intel.com * Add a new message buffer to each existing CPU's free
221*12004Sjiang.liu@intel.com * list, as well as one for my list for each of them.
222*12004Sjiang.liu@intel.com * Note: cpu0 is statically inserted into cpu[] array,
223*12004Sjiang.liu@intel.com * so need to check cpu[c] isn't cpup itself to avoid
224*12004Sjiang.liu@intel.com * allocating extra message buffers for cpu0.
225*12004Sjiang.liu@intel.com */
226*12004Sjiang.liu@intel.com msg = kmem_zalloc(sizeof (*msg), KM_SLEEP);
227*12004Sjiang.liu@intel.com msg->xc_command = XC_MSG_FREE;
228*12004Sjiang.liu@intel.com msg->xc_master = c;
229*12004Sjiang.liu@intel.com xc_insert(&cpu[c]->cpu_m.xc_free, msg);
2309489SJoe.Bonasera@sun.com
231*12004Sjiang.liu@intel.com msg = kmem_zalloc(sizeof (*msg), KM_SLEEP);
232*12004Sjiang.liu@intel.com msg->xc_command = XC_MSG_FREE;
233*12004Sjiang.liu@intel.com msg->xc_master = cpup->cpu_id;
234*12004Sjiang.liu@intel.com xc_insert(&cpup->cpu_m.xc_free, msg);
235*12004Sjiang.liu@intel.com }
236*12004Sjiang.liu@intel.com }
237*12004Sjiang.liu@intel.com
238*12004Sjiang.liu@intel.com if (!plat_dr_support_cpu()) {
239*12004Sjiang.liu@intel.com /*
240*12004Sjiang.liu@intel.com * Add one for self messages if CPU hotplug is disabled.
241*12004Sjiang.liu@intel.com */
2429489SJoe.Bonasera@sun.com msg = kmem_zalloc(sizeof (*msg), KM_SLEEP);
2439489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_FREE;
24410022SJoe.Bonasera@sun.com msg->xc_master = cpup->cpu_id;
2459489SJoe.Bonasera@sun.com xc_insert(&cpup->cpu_m.xc_free, msg);
2469489SJoe.Bonasera@sun.com }
2479489SJoe.Bonasera@sun.com
2489489SJoe.Bonasera@sun.com if (!xc_initialized)
2499489SJoe.Bonasera@sun.com xc_initialized = 1;
2509489SJoe.Bonasera@sun.com }
2519489SJoe.Bonasera@sun.com
252*12004Sjiang.liu@intel.com void
xc_fini_cpu(struct cpu * cpup)253*12004Sjiang.liu@intel.com xc_fini_cpu(struct cpu *cpup)
254*12004Sjiang.liu@intel.com {
255*12004Sjiang.liu@intel.com xc_msg_t *msg;
256*12004Sjiang.liu@intel.com
257*12004Sjiang.liu@intel.com ASSERT((cpup->cpu_flags & CPU_READY) == 0);
258*12004Sjiang.liu@intel.com ASSERT(cpup->cpu_m.xc_msgbox == NULL);
259*12004Sjiang.liu@intel.com ASSERT(cpup->cpu_m.xc_work_cnt == 0);
260*12004Sjiang.liu@intel.com
261*12004Sjiang.liu@intel.com while ((msg = xc_extract(&cpup->cpu_m.xc_free)) != NULL) {
262*12004Sjiang.liu@intel.com kmem_free(msg, sizeof (*msg));
263*12004Sjiang.liu@intel.com }
264*12004Sjiang.liu@intel.com }
265*12004Sjiang.liu@intel.com
266*12004Sjiang.liu@intel.com #define XC_FLUSH_MAX_WAITS 1000
267*12004Sjiang.liu@intel.com
268*12004Sjiang.liu@intel.com /* Flush inflight message buffers. */
269*12004Sjiang.liu@intel.com int
xc_flush_cpu(struct cpu * cpup)270*12004Sjiang.liu@intel.com xc_flush_cpu(struct cpu *cpup)
271*12004Sjiang.liu@intel.com {
272*12004Sjiang.liu@intel.com int i;
273*12004Sjiang.liu@intel.com
274*12004Sjiang.liu@intel.com ASSERT((cpup->cpu_flags & CPU_READY) == 0);
275*12004Sjiang.liu@intel.com
276*12004Sjiang.liu@intel.com /*
277*12004Sjiang.liu@intel.com * Pause all working CPUs, which ensures that there's no CPU in
278*12004Sjiang.liu@intel.com * function xc_common().
279*12004Sjiang.liu@intel.com * This is used to work around a race condition window in xc_common()
280*12004Sjiang.liu@intel.com * between checking CPU_READY flag and increasing working item count.
281*12004Sjiang.liu@intel.com */
282*12004Sjiang.liu@intel.com pause_cpus(cpup);
283*12004Sjiang.liu@intel.com start_cpus();
284*12004Sjiang.liu@intel.com
285*12004Sjiang.liu@intel.com for (i = 0; i < XC_FLUSH_MAX_WAITS; i++) {
286*12004Sjiang.liu@intel.com if (cpup->cpu_m.xc_work_cnt == 0) {
287*12004Sjiang.liu@intel.com break;
288*12004Sjiang.liu@intel.com }
289*12004Sjiang.liu@intel.com DELAY(1);
290*12004Sjiang.liu@intel.com }
291*12004Sjiang.liu@intel.com for (; i < XC_FLUSH_MAX_WAITS; i++) {
292*12004Sjiang.liu@intel.com if (!BT_TEST(xc_priority_set, cpup->cpu_id)) {
293*12004Sjiang.liu@intel.com break;
294*12004Sjiang.liu@intel.com }
295*12004Sjiang.liu@intel.com DELAY(1);
296*12004Sjiang.liu@intel.com }
297*12004Sjiang.liu@intel.com
298*12004Sjiang.liu@intel.com return (i >= XC_FLUSH_MAX_WAITS ? ETIME : 0);
299*12004Sjiang.liu@intel.com }
300*12004Sjiang.liu@intel.com
3019489SJoe.Bonasera@sun.com /*
3029489SJoe.Bonasera@sun.com * X-call message processing routine. Note that this is used by both
3039489SJoe.Bonasera@sun.com * senders and recipients of messages.
3040Sstevel@tonic-gate *
3059489SJoe.Bonasera@sun.com * We're protected against changing CPUs by either being in a high-priority
3069489SJoe.Bonasera@sun.com * interrupt, having preemption disabled or by having a raised SPL.
3070Sstevel@tonic-gate */
3080Sstevel@tonic-gate /*ARGSUSED*/
3090Sstevel@tonic-gate uint_t
xc_serv(caddr_t arg1,caddr_t arg2)3100Sstevel@tonic-gate xc_serv(caddr_t arg1, caddr_t arg2)
3110Sstevel@tonic-gate {
3129489SJoe.Bonasera@sun.com struct machcpu *mcpup = &(CPU->cpu_m);
3139489SJoe.Bonasera@sun.com xc_msg_t *msg;
3149489SJoe.Bonasera@sun.com xc_data_t *data;
3159489SJoe.Bonasera@sun.com xc_msg_t *xc_waiters = NULL;
3169489SJoe.Bonasera@sun.com uint32_t num_waiting = 0;
3179489SJoe.Bonasera@sun.com xc_func_t func;
3189489SJoe.Bonasera@sun.com xc_arg_t a1;
3199489SJoe.Bonasera@sun.com xc_arg_t a2;
3209489SJoe.Bonasera@sun.com xc_arg_t a3;
3219489SJoe.Bonasera@sun.com uint_t rc = DDI_INTR_UNCLAIMED;
3223446Smrj
3239489SJoe.Bonasera@sun.com while (mcpup->xc_work_cnt != 0) {
3249489SJoe.Bonasera@sun.com rc = DDI_INTR_CLAIMED;
3253446Smrj
3269489SJoe.Bonasera@sun.com /*
3279489SJoe.Bonasera@sun.com * We may have to wait for a message to arrive.
3289489SJoe.Bonasera@sun.com */
32910022SJoe.Bonasera@sun.com for (msg = NULL; msg == NULL;
33010022SJoe.Bonasera@sun.com msg = xc_extract(&mcpup->xc_msgbox)) {
33110022SJoe.Bonasera@sun.com
3329489SJoe.Bonasera@sun.com /*
3339568SJoe.Bonasera@sun.com * Alway check for and handle a priority message.
3349489SJoe.Bonasera@sun.com */
3359568SJoe.Bonasera@sun.com if (BT_TEST(xc_priority_set, CPU->cpu_id)) {
3369489SJoe.Bonasera@sun.com func = xc_priority_data.xc_func;
3379489SJoe.Bonasera@sun.com a1 = xc_priority_data.xc_a1;
3389489SJoe.Bonasera@sun.com a2 = xc_priority_data.xc_a2;
3399489SJoe.Bonasera@sun.com a3 = xc_priority_data.xc_a3;
3409568SJoe.Bonasera@sun.com XC_BT_CLEAR(xc_priority_set, CPU->cpu_id);
3419489SJoe.Bonasera@sun.com xc_decrement(mcpup);
3429489SJoe.Bonasera@sun.com func(a1, a2, a3);
3439489SJoe.Bonasera@sun.com if (mcpup->xc_work_cnt == 0)
3449489SJoe.Bonasera@sun.com return (rc);
3459489SJoe.Bonasera@sun.com }
3460Sstevel@tonic-gate
3479489SJoe.Bonasera@sun.com /*
3489489SJoe.Bonasera@sun.com * wait for a message to arrive
3499489SJoe.Bonasera@sun.com */
35010022SJoe.Bonasera@sun.com SMT_PAUSE();
3510Sstevel@tonic-gate }
3523446Smrj
3530Sstevel@tonic-gate
3543446Smrj /*
3559489SJoe.Bonasera@sun.com * process the message
3563446Smrj */
3579489SJoe.Bonasera@sun.com switch (msg->xc_command) {
3580Sstevel@tonic-gate
3593446Smrj /*
3609489SJoe.Bonasera@sun.com * ASYNC gives back the message immediately, then we do the
3619489SJoe.Bonasera@sun.com * function and return with no more waiting.
3623446Smrj */
3639489SJoe.Bonasera@sun.com case XC_MSG_ASYNC:
3649489SJoe.Bonasera@sun.com data = &cpu[msg->xc_master]->cpu_m.xc_data;
3659489SJoe.Bonasera@sun.com func = data->xc_func;
3669489SJoe.Bonasera@sun.com a1 = data->xc_a1;
3679489SJoe.Bonasera@sun.com a2 = data->xc_a2;
3689489SJoe.Bonasera@sun.com a3 = data->xc_a3;
3699489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_DONE;
3709489SJoe.Bonasera@sun.com xc_insert(&cpu[msg->xc_master]->cpu_m.xc_msgbox, msg);
3719489SJoe.Bonasera@sun.com if (func != NULL)
3729489SJoe.Bonasera@sun.com (void) (*func)(a1, a2, a3);
3739489SJoe.Bonasera@sun.com xc_decrement(mcpup);
3749489SJoe.Bonasera@sun.com break;
3750Sstevel@tonic-gate
3769489SJoe.Bonasera@sun.com /*
3779489SJoe.Bonasera@sun.com * SYNC messages do the call, then send it back to the master
3789489SJoe.Bonasera@sun.com * in WAITING mode
3799489SJoe.Bonasera@sun.com */
3809489SJoe.Bonasera@sun.com case XC_MSG_SYNC:
3819489SJoe.Bonasera@sun.com data = &cpu[msg->xc_master]->cpu_m.xc_data;
3829489SJoe.Bonasera@sun.com if (data->xc_func != NULL)
3839489SJoe.Bonasera@sun.com (void) (*data->xc_func)(data->xc_a1,
3849489SJoe.Bonasera@sun.com data->xc_a2, data->xc_a3);
3859489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_WAITING;
3869489SJoe.Bonasera@sun.com xc_insert(&cpu[msg->xc_master]->cpu_m.xc_msgbox, msg);
3879489SJoe.Bonasera@sun.com break;
3880Sstevel@tonic-gate
3899489SJoe.Bonasera@sun.com /*
3909489SJoe.Bonasera@sun.com * WAITING messsages are collected by the master until all
3919489SJoe.Bonasera@sun.com * have arrived. Once all arrive, we release them back to
3929489SJoe.Bonasera@sun.com * the slaves
3939489SJoe.Bonasera@sun.com */
3949489SJoe.Bonasera@sun.com case XC_MSG_WAITING:
3959489SJoe.Bonasera@sun.com xc_insert(&xc_waiters, msg);
3969489SJoe.Bonasera@sun.com if (++num_waiting < mcpup->xc_wait_cnt)
3979489SJoe.Bonasera@sun.com break;
3989489SJoe.Bonasera@sun.com while ((msg = xc_extract(&xc_waiters)) != NULL) {
3999489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_RELEASED;
4009489SJoe.Bonasera@sun.com xc_insert(&cpu[msg->xc_slave]->cpu_m.xc_msgbox,
4019489SJoe.Bonasera@sun.com msg);
4029489SJoe.Bonasera@sun.com --num_waiting;
4039489SJoe.Bonasera@sun.com }
4049489SJoe.Bonasera@sun.com if (num_waiting != 0)
4059489SJoe.Bonasera@sun.com panic("wrong number waiting");
4069489SJoe.Bonasera@sun.com mcpup->xc_wait_cnt = 0;
4079489SJoe.Bonasera@sun.com break;
4080Sstevel@tonic-gate
4099489SJoe.Bonasera@sun.com /*
4109489SJoe.Bonasera@sun.com * CALL messages do the function and then, like RELEASE,
4119489SJoe.Bonasera@sun.com * send the message is back to master as DONE.
4129489SJoe.Bonasera@sun.com */
4139489SJoe.Bonasera@sun.com case XC_MSG_CALL:
4149489SJoe.Bonasera@sun.com data = &cpu[msg->xc_master]->cpu_m.xc_data;
4159489SJoe.Bonasera@sun.com if (data->xc_func != NULL)
4169489SJoe.Bonasera@sun.com (void) (*data->xc_func)(data->xc_a1,
4179489SJoe.Bonasera@sun.com data->xc_a2, data->xc_a3);
4189489SJoe.Bonasera@sun.com /*FALLTHROUGH*/
4199489SJoe.Bonasera@sun.com case XC_MSG_RELEASED:
4209489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_DONE;
4219489SJoe.Bonasera@sun.com xc_insert(&cpu[msg->xc_master]->cpu_m.xc_msgbox, msg);
4229489SJoe.Bonasera@sun.com xc_decrement(mcpup);
4239489SJoe.Bonasera@sun.com break;
4240Sstevel@tonic-gate
4259489SJoe.Bonasera@sun.com /*
4269489SJoe.Bonasera@sun.com * DONE means a slave has completely finished up.
4279489SJoe.Bonasera@sun.com * Once we collect all the DONE messages, we'll exit
4289489SJoe.Bonasera@sun.com * processing too.
4299489SJoe.Bonasera@sun.com */
4309489SJoe.Bonasera@sun.com case XC_MSG_DONE:
4319489SJoe.Bonasera@sun.com msg->xc_command = XC_MSG_FREE;
4329489SJoe.Bonasera@sun.com xc_insert(&mcpup->xc_free, msg);
4339489SJoe.Bonasera@sun.com xc_decrement(mcpup);
4349489SJoe.Bonasera@sun.com break;
4359489SJoe.Bonasera@sun.com
4369489SJoe.Bonasera@sun.com case XC_MSG_FREE:
43710022SJoe.Bonasera@sun.com panic("free message 0x%p in msgbox", (void *)msg);
4389489SJoe.Bonasera@sun.com break;
4399489SJoe.Bonasera@sun.com
4409489SJoe.Bonasera@sun.com default:
44110022SJoe.Bonasera@sun.com panic("bad message 0x%p in msgbox", (void *)msg);
4429489SJoe.Bonasera@sun.com break;
4439489SJoe.Bonasera@sun.com }
4449489SJoe.Bonasera@sun.com }
4459489SJoe.Bonasera@sun.com return (rc);
4460Sstevel@tonic-gate }
4470Sstevel@tonic-gate
4483543Sjosephb /*
4499489SJoe.Bonasera@sun.com * Initiate cross call processing.
4500Sstevel@tonic-gate */
4510Sstevel@tonic-gate static void
xc_common(xc_func_t func,xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set,uint_t command)4520Sstevel@tonic-gate xc_common(
4530Sstevel@tonic-gate xc_func_t func,
4540Sstevel@tonic-gate xc_arg_t arg1,
4550Sstevel@tonic-gate xc_arg_t arg2,
4560Sstevel@tonic-gate xc_arg_t arg3,
4579489SJoe.Bonasera@sun.com ulong_t *set,
4589489SJoe.Bonasera@sun.com uint_t command)
4590Sstevel@tonic-gate {
4609489SJoe.Bonasera@sun.com int c;
4610Sstevel@tonic-gate struct cpu *cpup;
4629489SJoe.Bonasera@sun.com xc_msg_t *msg;
4639489SJoe.Bonasera@sun.com xc_data_t *data;
4649489SJoe.Bonasera@sun.com int cnt;
4659489SJoe.Bonasera@sun.com int save_spl;
4660Sstevel@tonic-gate
4679489SJoe.Bonasera@sun.com if (!xc_initialized) {
4689489SJoe.Bonasera@sun.com if (BT_TEST(set, CPU->cpu_id) && (CPU->cpu_flags & CPU_READY) &&
4699489SJoe.Bonasera@sun.com func != NULL)
4709489SJoe.Bonasera@sun.com (void) (*func)(arg1, arg2, arg3);
4719489SJoe.Bonasera@sun.com return;
4729489SJoe.Bonasera@sun.com }
4730Sstevel@tonic-gate
4749489SJoe.Bonasera@sun.com save_spl = splr(ipltospl(XC_HI_PIL));
4750Sstevel@tonic-gate
4760Sstevel@tonic-gate /*
4779489SJoe.Bonasera@sun.com * fill in cross call data
4780Sstevel@tonic-gate */
4799489SJoe.Bonasera@sun.com data = &CPU->cpu_m.xc_data;
4809489SJoe.Bonasera@sun.com data->xc_func = func;
4819489SJoe.Bonasera@sun.com data->xc_a1 = arg1;
4829489SJoe.Bonasera@sun.com data->xc_a2 = arg2;
4839489SJoe.Bonasera@sun.com data->xc_a3 = arg3;
4846336Sbholler
4850Sstevel@tonic-gate /*
4869489SJoe.Bonasera@sun.com * Post messages to all CPUs involved that are CPU_READY
4870Sstevel@tonic-gate */
4889489SJoe.Bonasera@sun.com CPU->cpu_m.xc_wait_cnt = 0;
489*12004Sjiang.liu@intel.com for (c = 0; c < max_ncpus; ++c) {
4909489SJoe.Bonasera@sun.com if (!BT_TEST(set, c))
4919489SJoe.Bonasera@sun.com continue;
4929489SJoe.Bonasera@sun.com cpup = cpu[c];
4939489SJoe.Bonasera@sun.com if (cpup == NULL || !(cpup->cpu_flags & CPU_READY))
4946336Sbholler continue;
4956336Sbholler
4969489SJoe.Bonasera@sun.com /*
4979489SJoe.Bonasera@sun.com * Fill out a new message.
4989489SJoe.Bonasera@sun.com */
4999489SJoe.Bonasera@sun.com msg = xc_extract(&CPU->cpu_m.xc_free);
5009489SJoe.Bonasera@sun.com if (msg == NULL)
5019489SJoe.Bonasera@sun.com panic("Ran out of free xc_msg_t's");
5029489SJoe.Bonasera@sun.com msg->xc_command = command;
50310022SJoe.Bonasera@sun.com if (msg->xc_master != CPU->cpu_id)
50410022SJoe.Bonasera@sun.com panic("msg %p has wrong xc_master", (void *)msg);
5059489SJoe.Bonasera@sun.com msg->xc_slave = c;
5060Sstevel@tonic-gate
5079489SJoe.Bonasera@sun.com /*
5089489SJoe.Bonasera@sun.com * Increment my work count for all messages that I'll
5099489SJoe.Bonasera@sun.com * transition from DONE to FREE.
5109489SJoe.Bonasera@sun.com * Also remember how many XC_MSG_WAITINGs to look for
5119489SJoe.Bonasera@sun.com */
5129489SJoe.Bonasera@sun.com (void) xc_increment(&CPU->cpu_m);
5139489SJoe.Bonasera@sun.com if (command == XC_MSG_SYNC)
5149489SJoe.Bonasera@sun.com ++CPU->cpu_m.xc_wait_cnt;
5150Sstevel@tonic-gate
5169489SJoe.Bonasera@sun.com /*
5179489SJoe.Bonasera@sun.com * Increment the target CPU work count then insert the message
5189489SJoe.Bonasera@sun.com * in the target msgbox. If I post the first bit of work
5199489SJoe.Bonasera@sun.com * for the target to do, send an IPI to the target CPU.
5209489SJoe.Bonasera@sun.com */
5219489SJoe.Bonasera@sun.com cnt = xc_increment(&cpup->cpu_m);
5229489SJoe.Bonasera@sun.com xc_insert(&cpup->cpu_m.xc_msgbox, msg);
5239489SJoe.Bonasera@sun.com if (cpup != CPU) {
5249489SJoe.Bonasera@sun.com if (cnt == 0) {
5259489SJoe.Bonasera@sun.com CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
5269489SJoe.Bonasera@sun.com send_dirint(c, XC_HI_PIL);
5279489SJoe.Bonasera@sun.com if (xc_collect_enable)
5289489SJoe.Bonasera@sun.com ++xc_total_cnt;
5299489SJoe.Bonasera@sun.com } else if (xc_collect_enable) {
5309489SJoe.Bonasera@sun.com ++xc_multi_cnt;
5310Sstevel@tonic-gate }
5320Sstevel@tonic-gate }
5330Sstevel@tonic-gate }
5340Sstevel@tonic-gate
5350Sstevel@tonic-gate /*
5369489SJoe.Bonasera@sun.com * Now drop into the message handler until all work is done
5379489SJoe.Bonasera@sun.com */
5389489SJoe.Bonasera@sun.com (void) xc_serv(NULL, NULL);
5399489SJoe.Bonasera@sun.com splx(save_spl);
5409489SJoe.Bonasera@sun.com }
5419489SJoe.Bonasera@sun.com
5429489SJoe.Bonasera@sun.com /*
5439489SJoe.Bonasera@sun.com * Push out a priority cross call.
5449489SJoe.Bonasera@sun.com */
5459489SJoe.Bonasera@sun.com static void
xc_priority_common(xc_func_t func,xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set)5469489SJoe.Bonasera@sun.com xc_priority_common(
5479489SJoe.Bonasera@sun.com xc_func_t func,
5489489SJoe.Bonasera@sun.com xc_arg_t arg1,
5499489SJoe.Bonasera@sun.com xc_arg_t arg2,
5509489SJoe.Bonasera@sun.com xc_arg_t arg3,
5519489SJoe.Bonasera@sun.com ulong_t *set)
5529489SJoe.Bonasera@sun.com {
5539489SJoe.Bonasera@sun.com int i;
5549489SJoe.Bonasera@sun.com int c;
5559489SJoe.Bonasera@sun.com struct cpu *cpup;
5569489SJoe.Bonasera@sun.com
5579489SJoe.Bonasera@sun.com /*
5589568SJoe.Bonasera@sun.com * Wait briefly for any previous xc_priority to have finished.
5590Sstevel@tonic-gate */
560*12004Sjiang.liu@intel.com for (c = 0; c < max_ncpus; ++c) {
5619568SJoe.Bonasera@sun.com cpup = cpu[c];
5629568SJoe.Bonasera@sun.com if (cpup == NULL || !(cpup->cpu_flags & CPU_READY))
5639568SJoe.Bonasera@sun.com continue;
5649568SJoe.Bonasera@sun.com
5659568SJoe.Bonasera@sun.com /*
5669568SJoe.Bonasera@sun.com * The value of 40000 here is from old kernel code. It
5679568SJoe.Bonasera@sun.com * really should be changed to some time based value, since
5689568SJoe.Bonasera@sun.com * under a hypervisor, there's no guarantee a remote CPU
5699568SJoe.Bonasera@sun.com * is even scheduled.
5709568SJoe.Bonasera@sun.com */
5719568SJoe.Bonasera@sun.com for (i = 0; BT_TEST(xc_priority_set, c) && i < 40000; ++i)
5729568SJoe.Bonasera@sun.com SMT_PAUSE();
5739568SJoe.Bonasera@sun.com
5749568SJoe.Bonasera@sun.com /*
5759568SJoe.Bonasera@sun.com * Some CPU did not respond to a previous priority request. It's
5769568SJoe.Bonasera@sun.com * probably deadlocked with interrupts blocked or some such
5779568SJoe.Bonasera@sun.com * problem. We'll just erase the previous request - which was
5789568SJoe.Bonasera@sun.com * most likely a kmdb_enter that has already expired - and plow
5799568SJoe.Bonasera@sun.com * ahead.
5809568SJoe.Bonasera@sun.com */
5819568SJoe.Bonasera@sun.com if (BT_TEST(xc_priority_set, c)) {
5829568SJoe.Bonasera@sun.com XC_BT_CLEAR(xc_priority_set, c);
5839568SJoe.Bonasera@sun.com if (cpup->cpu_m.xc_work_cnt > 0)
5849568SJoe.Bonasera@sun.com xc_decrement(&cpup->cpu_m);
5859568SJoe.Bonasera@sun.com }
5869489SJoe.Bonasera@sun.com }
5879489SJoe.Bonasera@sun.com
5889489SJoe.Bonasera@sun.com /*
5899489SJoe.Bonasera@sun.com * fill in cross call data
5909489SJoe.Bonasera@sun.com */
5919489SJoe.Bonasera@sun.com xc_priority_data.xc_func = func;
5929489SJoe.Bonasera@sun.com xc_priority_data.xc_a1 = arg1;
5939489SJoe.Bonasera@sun.com xc_priority_data.xc_a2 = arg2;
5949489SJoe.Bonasera@sun.com xc_priority_data.xc_a3 = arg3;
5959489SJoe.Bonasera@sun.com
5969489SJoe.Bonasera@sun.com /*
5979489SJoe.Bonasera@sun.com * Post messages to all CPUs involved that are CPU_READY
5989489SJoe.Bonasera@sun.com * We'll always IPI, plus bang on the xc_msgbox for i86_mwait()
5999489SJoe.Bonasera@sun.com */
600*12004Sjiang.liu@intel.com for (c = 0; c < max_ncpus; ++c) {
6019489SJoe.Bonasera@sun.com if (!BT_TEST(set, c))
6029489SJoe.Bonasera@sun.com continue;
6039489SJoe.Bonasera@sun.com cpup = cpu[c];
6049489SJoe.Bonasera@sun.com if (cpup == NULL || !(cpup->cpu_flags & CPU_READY) ||
6059489SJoe.Bonasera@sun.com cpup == CPU)
6069489SJoe.Bonasera@sun.com continue;
6079489SJoe.Bonasera@sun.com (void) xc_increment(&cpup->cpu_m);
6089568SJoe.Bonasera@sun.com XC_BT_SET(xc_priority_set, c);
6099489SJoe.Bonasera@sun.com send_dirint(c, XC_HI_PIL);
6109489SJoe.Bonasera@sun.com for (i = 0; i < 10; ++i) {
6119489SJoe.Bonasera@sun.com (void) casptr(&cpup->cpu_m.xc_msgbox,
6129489SJoe.Bonasera@sun.com cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox);
6130Sstevel@tonic-gate }
6140Sstevel@tonic-gate }
6150Sstevel@tonic-gate }
6160Sstevel@tonic-gate
6170Sstevel@tonic-gate /*
6189489SJoe.Bonasera@sun.com * Do cross call to all other CPUs with absolutely no waiting or handshaking.
6199489SJoe.Bonasera@sun.com * This should only be used for extraordinary operations, like panic(), which
6209489SJoe.Bonasera@sun.com * need to work, in some fashion, in a not completely functional system.
6219489SJoe.Bonasera@sun.com * All other uses that want minimal waiting should use xc_call_nowait().
6220Sstevel@tonic-gate */
6230Sstevel@tonic-gate void
xc_priority(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set,xc_func_t func)6249489SJoe.Bonasera@sun.com xc_priority(
6250Sstevel@tonic-gate xc_arg_t arg1,
6260Sstevel@tonic-gate xc_arg_t arg2,
6270Sstevel@tonic-gate xc_arg_t arg3,
6289489SJoe.Bonasera@sun.com ulong_t *set,
6290Sstevel@tonic-gate xc_func_t func)
6300Sstevel@tonic-gate {
6319489SJoe.Bonasera@sun.com extern int IGNORE_KERNEL_PREEMPTION;
6329489SJoe.Bonasera@sun.com int save_spl = splr(ipltospl(XC_HI_PIL));
6339489SJoe.Bonasera@sun.com int save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
6340Sstevel@tonic-gate
6350Sstevel@tonic-gate IGNORE_KERNEL_PREEMPTION = 1;
6369489SJoe.Bonasera@sun.com xc_priority_common((xc_func_t)func, arg1, arg2, arg3, set);
6370Sstevel@tonic-gate IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
6389489SJoe.Bonasera@sun.com splx(save_spl);
6390Sstevel@tonic-gate }
6400Sstevel@tonic-gate
6410Sstevel@tonic-gate /*
6429489SJoe.Bonasera@sun.com * Wrapper for kmdb to capture other CPUs, causing them to enter the debugger.
6430Sstevel@tonic-gate */
6440Sstevel@tonic-gate void
kdi_xc_others(int this_cpu,void (* func)(void))6450Sstevel@tonic-gate kdi_xc_others(int this_cpu, void (*func)(void))
6460Sstevel@tonic-gate {
6479489SJoe.Bonasera@sun.com extern int IGNORE_KERNEL_PREEMPTION;
6480Sstevel@tonic-gate int save_kernel_preemption;
6490Sstevel@tonic-gate cpuset_t set;
6500Sstevel@tonic-gate
6513446Smrj if (!xc_initialized)
6523446Smrj return;
6533446Smrj
6540Sstevel@tonic-gate save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
6550Sstevel@tonic-gate IGNORE_KERNEL_PREEMPTION = 1;
6569489SJoe.Bonasera@sun.com CPUSET_ALL_BUT(set, this_cpu);
6579489SJoe.Bonasera@sun.com xc_priority_common((xc_func_t)func, 0, 0, 0, CPUSET2BV(set));
6580Sstevel@tonic-gate IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
6590Sstevel@tonic-gate }
6609489SJoe.Bonasera@sun.com
6619489SJoe.Bonasera@sun.com
6629489SJoe.Bonasera@sun.com
6639489SJoe.Bonasera@sun.com /*
6649489SJoe.Bonasera@sun.com * Invoke function on specified processors. Remotes may continue after
6659489SJoe.Bonasera@sun.com * service with no waiting. xc_call_nowait() may return immediately too.
6669489SJoe.Bonasera@sun.com */
6679489SJoe.Bonasera@sun.com void
xc_call_nowait(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set,xc_func_t func)6689489SJoe.Bonasera@sun.com xc_call_nowait(
6699489SJoe.Bonasera@sun.com xc_arg_t arg1,
6709489SJoe.Bonasera@sun.com xc_arg_t arg2,
6719489SJoe.Bonasera@sun.com xc_arg_t arg3,
6729489SJoe.Bonasera@sun.com ulong_t *set,
6739489SJoe.Bonasera@sun.com xc_func_t func)
6749489SJoe.Bonasera@sun.com {
6759489SJoe.Bonasera@sun.com xc_common(func, arg1, arg2, arg3, set, XC_MSG_ASYNC);
6769489SJoe.Bonasera@sun.com }
6779489SJoe.Bonasera@sun.com
6789489SJoe.Bonasera@sun.com /*
6799489SJoe.Bonasera@sun.com * Invoke function on specified processors. Remotes may continue after
6809489SJoe.Bonasera@sun.com * service with no waiting. xc_call() returns only after remotes have finished.
6819489SJoe.Bonasera@sun.com */
6829489SJoe.Bonasera@sun.com void
xc_call(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set,xc_func_t func)6839489SJoe.Bonasera@sun.com xc_call(
6849489SJoe.Bonasera@sun.com xc_arg_t arg1,
6859489SJoe.Bonasera@sun.com xc_arg_t arg2,
6869489SJoe.Bonasera@sun.com xc_arg_t arg3,
6879489SJoe.Bonasera@sun.com ulong_t *set,
6889489SJoe.Bonasera@sun.com xc_func_t func)
6899489SJoe.Bonasera@sun.com {
6909489SJoe.Bonasera@sun.com xc_common(func, arg1, arg2, arg3, set, XC_MSG_CALL);
6919489SJoe.Bonasera@sun.com }
6929489SJoe.Bonasera@sun.com
6939489SJoe.Bonasera@sun.com /*
6949489SJoe.Bonasera@sun.com * Invoke function on specified processors. Remotes wait until all have
6959489SJoe.Bonasera@sun.com * finished. xc_sync() also waits until all remotes have finished.
6969489SJoe.Bonasera@sun.com */
6979489SJoe.Bonasera@sun.com void
xc_sync(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3,ulong_t * set,xc_func_t func)6989489SJoe.Bonasera@sun.com xc_sync(
6999489SJoe.Bonasera@sun.com xc_arg_t arg1,
7009489SJoe.Bonasera@sun.com xc_arg_t arg2,
7019489SJoe.Bonasera@sun.com xc_arg_t arg3,
7029489SJoe.Bonasera@sun.com ulong_t *set,
7039489SJoe.Bonasera@sun.com xc_func_t func)
7049489SJoe.Bonasera@sun.com {
7059489SJoe.Bonasera@sun.com xc_common(func, arg1, arg2, arg3, set, XC_MSG_SYNC);
7069489SJoe.Bonasera@sun.com }
707