xref: /dflybsd-src/sys/platform/pc64/apic/lapic.c (revision 0eb2eccd5a86ef7dd7492d2651de55c3589f23d7)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine_base/apic/lapic.h>
38 #include <machine_base/apic/ioapic_abi.h>
39 #include <machine/segments.h>
40 #include <sys/thread2.h>
41 
42 #include <machine/intr_machdep.h>
43 
44 #include "apicvar.h"
45 
46 volatile lapic_t *lapic;
47 
48 static void	lapic_timer_calibrate(void);
49 static void	lapic_timer_set_divisor(int);
50 static void	lapic_timer_fixup_handler(void *);
51 static void	lapic_timer_restart_handler(void *);
52 
53 void		lapic_timer_process(void);
54 void		lapic_timer_process_frame(struct intrframe *);
55 void		lapic_timer_always(struct intrframe *);
56 
57 static int	lapic_timer_enable = 1;
58 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
59 
60 static void	lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
61 static void	lapic_timer_intr_enable(struct cputimer_intr *);
62 static void	lapic_timer_intr_restart(struct cputimer_intr *);
63 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
64 
65 static struct cputimer_intr lapic_cputimer_intr = {
66 	.freq = 0,
67 	.reload = lapic_timer_intr_reload,
68 	.enable = lapic_timer_intr_enable,
69 	.config = cputimer_intr_default_config,
70 	.restart = lapic_timer_intr_restart,
71 	.pmfixup = lapic_timer_intr_pmfixup,
72 	.initclock = cputimer_intr_default_initclock,
73 	.next = SLIST_ENTRY_INITIALIZER,
74 	.name = "lapic",
75 	.type = CPUTIMER_INTR_LAPIC,
76 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
77 	.caps = CPUTIMER_INTR_CAP_NONE
78 };
79 
80 static int		lapic_timer_divisor_idx = -1;
81 static const uint32_t	lapic_timer_divisors[] = {
82 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
83 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
84 };
85 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
86 
87 void
88 lapic_eoi(void)
89 {
90 
91 	lapic->eoi = 0;
92 }
93 
94 /*
95  * Enable LAPIC, configure interrupts.
96  */
97 void
98 lapic_init(boolean_t bsp)
99 {
100 	uint32_t timer;
101 	u_int   temp;
102 
103 	/*
104 	 * Install vectors
105 	 *
106 	 * Since IDT is shared between BSP and APs, these vectors
107 	 * only need to be installed once; we do it on BSP.
108 	 */
109 	if (bsp) {
110 		/* Install a 'Spurious INTerrupt' vector */
111 		setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
112 		    SDT_SYSIGT, SEL_KPL, 0);
113 
114 		/* Install an inter-CPU IPI for TLB invalidation */
115 		setidt(XINVLTLB_OFFSET, Xinvltlb,
116 		    SDT_SYSIGT, SEL_KPL, 0);
117 
118 		/* Install an inter-CPU IPI for IPIQ messaging */
119 		setidt(XIPIQ_OFFSET, Xipiq,
120 		    SDT_SYSIGT, SEL_KPL, 0);
121 
122 		/* Install a timer vector */
123 		setidt(XTIMER_OFFSET, Xtimer,
124 		    SDT_SYSIGT, SEL_KPL, 0);
125 
126 		/* Install an inter-CPU IPI for CPU stop/restart */
127 		setidt(XCPUSTOP_OFFSET, Xcpustop,
128 		    SDT_SYSIGT, SEL_KPL, 0);
129 	}
130 
131 	/*
132 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
133 	 * aggregate interrupt input from the 8259.  The INTA cycle
134 	 * will be routed to the external controller (the 8259) which
135 	 * is expected to supply the vector.
136 	 *
137 	 * Must be setup edge triggered, active high.
138 	 *
139 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
140 	 *
141 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
142 	 * mode we use because we leave it masked.
143 	 */
144 	temp = lapic->lvt_lint0;
145 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
146 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
147 	if (bsp) {
148 		temp |= APIC_LVT_DM_EXTINT;
149 		if (apic_io_enable)
150 			temp |= APIC_LVT_MASKED;
151 	} else {
152 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
153 	}
154 	lapic->lvt_lint0 = temp;
155 
156 	/*
157 	 * Setup LINT1 as NMI.
158 	 *
159 	 * Must be setup edge trigger, active high.
160 	 *
161 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
162 	 *
163 	 * Disable LINT1 on the APs.
164 	 */
165 	temp = lapic->lvt_lint1;
166 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
167 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
168 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
169 	if (bsp && apic_io_enable)
170 		temp &= ~APIC_LVT_MASKED;
171 	lapic->lvt_lint1 = temp;
172 
173 	/*
174 	 * Mask the LAPIC error interrupt, LAPIC performance counter
175 	 * interrupt.
176 	 */
177 	lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
178 	lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
179 
180 	/*
181 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
182 	 */
183 	timer = lapic->lvt_timer;
184 	timer &= ~APIC_LVTT_VECTOR;
185 	timer |= XTIMER_OFFSET;
186 	timer |= APIC_LVTT_MASKED;
187 	lapic->lvt_timer = timer;
188 
189 	/*
190 	 * Set the Task Priority Register as needed.   At the moment allow
191 	 * interrupts on all cpus (the APs will remain CLId until they are
192 	 * ready to deal).  We could disable all but IPIs by setting
193 	 * temp |= TPR_IPI for cpu != 0.
194 	 */
195 	temp = lapic->tpr;
196 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
197 #ifdef SMP /* APIC-IO */
198 if (!apic_io_enable) {
199 #endif
200 	/*
201  	 * If we are NOT running the IO APICs, the LAPIC will only be used
202 	 * for IPIs.  Set the TPR to prevent any unintentional interrupts.
203  	 */
204 	temp |= TPR_IPI;
205 #ifdef SMP /* APIC-IO */
206 }
207 #endif
208 	lapic->tpr = temp;
209 
210 	/*
211 	 * Enable the LAPIC
212 	 */
213 	temp = lapic->svr;
214 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
215 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
216 
217 	/*
218 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
219 	 * must be 1111.
220 	 */
221 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
222 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
223 	temp &= ~APIC_SVR_VECTOR;
224 	temp |= XSPURIOUSINT_OFFSET;
225 
226 	lapic->svr = temp;
227 
228 	/*
229 	 * Pump out a few EOIs to clean out interrupts that got through
230 	 * before we were able to set the TPR.
231 	 */
232 	lapic_eoi();
233 	lapic_eoi();
234 	lapic_eoi();
235 
236 	if (bsp) {
237 		lapic_timer_calibrate();
238 		if (lapic_timer_enable) {
239 			cputimer_intr_register(&lapic_cputimer_intr);
240 			cputimer_intr_select(&lapic_cputimer_intr, 0);
241 		}
242 	} else {
243 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
244 	}
245 
246 	if (bootverbose)
247 		apic_dump("apic_initialize()");
248 }
249 
250 static void
251 lapic_timer_set_divisor(int divisor_idx)
252 {
253 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
254 	lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
255 }
256 
257 static void
258 lapic_timer_oneshot(u_int count)
259 {
260 	uint32_t value;
261 
262 	value = lapic->lvt_timer;
263 	value &= ~APIC_LVTT_PERIODIC;
264 	lapic->lvt_timer = value;
265 	lapic->icr_timer = count;
266 }
267 
268 static void
269 lapic_timer_oneshot_quick(u_int count)
270 {
271 	lapic->icr_timer = count;
272 }
273 
274 static void
275 lapic_timer_calibrate(void)
276 {
277 	sysclock_t value;
278 
279 	/* Try to calibrate the local APIC timer. */
280 	for (lapic_timer_divisor_idx = 0;
281 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
282 	     lapic_timer_divisor_idx++) {
283 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
284 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
285 		DELAY(2000000);
286 		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
287 		if (value != APIC_TIMER_MAX_COUNT)
288 			break;
289 	}
290 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
291 		panic("lapic: no proper timer divisor?!\n");
292 	lapic_cputimer_intr.freq = value / 2;
293 
294 	kprintf("lapic: divisor index %d, frequency %u Hz\n",
295 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
296 }
297 
298 static void
299 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
300 {
301 	sysclock_t count;
302 
303 	gd->gd_timer_running = 0;
304 
305 	count = sys_cputimer->count();
306 	if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
307 		systimer_intr(&count, 0, frame);
308 }
309 
310 void
311 lapic_timer_process(void)
312 {
313 	lapic_timer_process_oncpu(mycpu, NULL);
314 }
315 
316 void
317 lapic_timer_process_frame(struct intrframe *frame)
318 {
319 	lapic_timer_process_oncpu(mycpu, frame);
320 }
321 
322 /*
323  * This manual debugging code is called unconditionally from Xtimer
324  * (the lapic timer interrupt) whether the current thread is in a
325  * critical section or not) and can be useful in tracking down lockups.
326  *
327  * NOTE: MANUAL DEBUG CODE
328  */
329 #if 0
330 static int saveticks[SMP_MAXCPU];
331 static int savecounts[SMP_MAXCPU];
332 #endif
333 
334 void
335 lapic_timer_always(struct intrframe *frame)
336 {
337 #if 0
338 	globaldata_t gd = mycpu;
339 	int cpu = gd->gd_cpuid;
340 	char buf[64];
341 	short *gptr;
342 	int i;
343 
344 	if (cpu <= 20) {
345 		gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
346 		*gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
347 		++gptr;
348 
349 		ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
350 		    (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
351 		    gd->gd_infomsg);
352 		for (i = 0; buf[i]; ++i) {
353 			gptr[i] = 0x0700 | (unsigned char)buf[i];
354 		}
355 	}
356 #if 0
357 	if (saveticks[gd->gd_cpuid] != ticks) {
358 		saveticks[gd->gd_cpuid] = ticks;
359 		savecounts[gd->gd_cpuid] = 0;
360 	}
361 	++savecounts[gd->gd_cpuid];
362 	if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
363 		panic("cpud %d panicing on ticks failure",
364 			gd->gd_cpuid);
365 	}
366 	for (i = 0; i < ncpus; ++i) {
367 		int delta;
368 		if (saveticks[i] && panicstr == NULL) {
369 			delta = saveticks[i] - ticks;
370 			if (delta < -10 || delta > 10) {
371 				panic("cpu %d panicing on cpu %d watchdog",
372 				      gd->gd_cpuid, i);
373 			}
374 		}
375 	}
376 #endif
377 #endif
378 }
379 
380 static void
381 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
382 {
383 	struct globaldata *gd = mycpu;
384 
385 	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
386 	if (reload < 2)
387 		reload = 2;
388 
389 	if (gd->gd_timer_running) {
390 		if (reload < lapic->ccr_timer)
391 			lapic_timer_oneshot_quick(reload);
392 	} else {
393 		gd->gd_timer_running = 1;
394 		lapic_timer_oneshot_quick(reload);
395 	}
396 }
397 
398 static void
399 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
400 {
401 	uint32_t timer;
402 
403 	timer = lapic->lvt_timer;
404 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
405 	lapic->lvt_timer = timer;
406 
407 	lapic_timer_fixup_handler(NULL);
408 }
409 
410 static void
411 lapic_timer_fixup_handler(void *arg)
412 {
413 	int *started = arg;
414 
415 	if (started != NULL)
416 		*started = 0;
417 
418 	if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
419 		/*
420 		 * Detect the presence of C1E capability mostly on latest
421 		 * dual-cores (or future) k8 family.  This feature renders
422 		 * the local APIC timer dead, so we disable it by reading
423 		 * the Interrupt Pending Message register and clearing both
424 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
425 		 *
426 		 * Reference:
427 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
428 		 *    Family 0Fh Processors"
429 		 *   #32559 revision 3.00
430 		 */
431 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
432 		    (cpu_id & 0x0fff0000) >= 0x00040000) {
433 			uint64_t msr;
434 
435 			msr = rdmsr(0xc0010055);
436 			if (msr & 0x18000000) {
437 				struct globaldata *gd = mycpu;
438 
439 				kprintf("cpu%d: AMD C1E detected\n",
440 					gd->gd_cpuid);
441 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
442 
443 				/*
444 				 * We are kinda stalled;
445 				 * kick start again.
446 				 */
447 				gd->gd_timer_running = 1;
448 				lapic_timer_oneshot_quick(2);
449 
450 				if (started != NULL)
451 					*started = 1;
452 			}
453 		}
454 	}
455 }
456 
457 static void
458 lapic_timer_restart_handler(void *dummy __unused)
459 {
460 	int started;
461 
462 	lapic_timer_fixup_handler(&started);
463 	if (!started) {
464 		struct globaldata *gd = mycpu;
465 
466 		gd->gd_timer_running = 1;
467 		lapic_timer_oneshot_quick(2);
468 	}
469 }
470 
471 /*
472  * This function is called only by ACPI-CA code currently:
473  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
474  *   module controls PM.  So once ACPI-CA is attached, we try
475  *   to apply the fixup to prevent LAPIC timer from hanging.
476  */
477 static void
478 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
479 {
480 	lwkt_send_ipiq_mask(smp_active_mask,
481 			    lapic_timer_fixup_handler, NULL);
482 }
483 
484 static void
485 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
486 {
487 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
488 }
489 
490 
491 /*
492  * dump contents of local APIC registers
493  */
494 void
495 apic_dump(char* str)
496 {
497 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
498 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
499 		lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
500 }
501 
502 /*
503  * Inter Processor Interrupt functions.
504  */
505 
506 /*
507  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
508  *
509  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
510  *  vector is any valid SYSTEM INT vector
511  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
512  *
513  * A backlog of requests can create a deadlock between cpus.  To avoid this
514  * we have to be able to accept IPIs at the same time we are trying to send
515  * them.  The critical section prevents us from attempting to send additional
516  * IPIs reentrantly, but also prevents IPIQ processing so we have to call
517  * lwkt_process_ipiq() manually.  It's rather messy and expensive for this
518  * to occur but fortunately it does not happen too often.
519  */
520 int
521 apic_ipi(int dest_type, int vector, int delivery_mode)
522 {
523 	u_long  icr_lo;
524 
525 	crit_enter();
526 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
527 	    unsigned long rflags = read_rflags();
528 	    cpu_enable_intr();
529 	    DEBUG_PUSH_INFO("apic_ipi");
530 	    while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
531 		lwkt_process_ipiq();
532 	    }
533 	    DEBUG_POP_INFO();
534 	    write_rflags(rflags);
535 	}
536 
537 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
538 		delivery_mode | vector;
539 	lapic->icr_lo = icr_lo;
540 	crit_exit();
541 	return 0;
542 }
543 
544 void
545 single_apic_ipi(int cpu, int vector, int delivery_mode)
546 {
547 	u_long  icr_lo;
548 	u_long  icr_hi;
549 
550 	crit_enter();
551 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
552 	    unsigned long rflags = read_rflags();
553 	    cpu_enable_intr();
554 	    DEBUG_PUSH_INFO("single_apic_ipi");
555 	    while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
556 		lwkt_process_ipiq();
557 	    }
558 	    DEBUG_POP_INFO();
559 	    write_rflags(rflags);
560 	}
561 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
562 	icr_hi |= (CPU_TO_ID(cpu) << 24);
563 	lapic->icr_hi = icr_hi;
564 
565 	/* build ICR_LOW */
566 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK)
567 	    | APIC_DEST_DESTFLD | delivery_mode | vector;
568 
569 	/* write APIC ICR */
570 	lapic->icr_lo = icr_lo;
571 	crit_exit();
572 }
573 
574 #if 0
575 
576 /*
577  * Returns 0 if the apic is busy, 1 if we were able to queue the request.
578  *
579  * NOT WORKING YET!  The code as-is may end up not queueing an IPI at all
580  * to the target, and the scheduler does not 'poll' for IPI messages.
581  */
582 int
583 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
584 {
585 	u_long  icr_lo;
586 	u_long  icr_hi;
587 
588 	crit_enter();
589 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
590 	    crit_exit();
591 	    return(0);
592 	}
593 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
594 	icr_hi |= (CPU_TO_ID(cpu) << 24);
595 	lapic->icr_hi = icr_hi;
596 
597 	/* build IRC_LOW */
598 	icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
599 	    | APIC_DEST_DESTFLD | delivery_mode | vector;
600 
601 	/* write APIC ICR */
602 	lapic->icr_lo = icr_lo;
603 	crit_exit();
604 	return(1);
605 }
606 
607 #endif
608 
609 /*
610  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
611  *
612  * target is a bitmask of destination cpus.  Vector is any
613  * valid system INT vector.  Delivery mode may be either
614  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
615  */
616 void
617 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
618 {
619 	crit_enter();
620 	while (target) {
621 		int n = BSFCPUMASK(target);
622 		target &= ~CPUMASK(n);
623 		single_apic_ipi(n, vector, delivery_mode);
624 	}
625 	crit_exit();
626 }
627 
628 /*
629  * Timer code, in development...
630  *  - suggested by rgrimes@gndrsh.aac.dev.com
631  */
632 int
633 get_apic_timer_frequency(void)
634 {
635 	return(lapic_cputimer_intr.freq);
636 }
637 
638 /*
639  * Load a 'downcount time' in uSeconds.
640  */
641 void
642 set_apic_timer(int us)
643 {
644 	u_int count;
645 
646 	/*
647 	 * When we reach here, lapic timer's frequency
648 	 * must have been calculated as well as the
649 	 * divisor (lapic->dcr_timer is setup during the
650 	 * divisor calculation).
651 	 */
652 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
653 		 lapic_timer_divisor_idx >= 0);
654 
655 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
656 	lapic_timer_oneshot(count);
657 }
658 
659 
660 /*
661  * Read remaining time in timer.
662  */
663 int
664 read_apic_timer(void)
665 {
666 #if 0
667 	/** XXX FIXME: we need to return the actual remaining time,
668          *         for now we just return the remaining count.
669          */
670 #else
671 	return lapic->ccr_timer;
672 #endif
673 }
674 
675 
676 /*
677  * Spin-style delay, set delay time in uS, spin till it drains.
678  */
679 void
680 u_sleep(int count)
681 {
682 	set_apic_timer(count);
683 	while (read_apic_timer())
684 		 /* spin */ ;
685 }
686 
687 int
688 lapic_unused_apic_id(int start)
689 {
690 	int i;
691 
692 	for (i = start; i < NAPICID; ++i) {
693 		if (ID_TO_CPU(i) == -1)
694 			return i;
695 	}
696 	return NAPICID;
697 }
698 
699 void
700 lapic_map(vm_offset_t lapic_addr)
701 {
702 	lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
703 
704 	kprintf("lapic: at 0x%08lx\n", lapic_addr);
705 }
706 
707 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
708 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
709 
710 void
711 lapic_config(void)
712 {
713 	struct lapic_enumerator *e;
714 	int error, i;
715 
716 	for (i = 0; i < NAPICID; ++i)
717 		ID_TO_CPU(i) = -1;
718 
719 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
720 		error = e->lapic_probe(e);
721 		if (!error)
722 			break;
723 	}
724 	if (e == NULL)
725 		panic("can't config lapic\n");
726 
727 	e->lapic_enumerate(e);
728 }
729 
730 void
731 lapic_enumerator_register(struct lapic_enumerator *ne)
732 {
733 	struct lapic_enumerator *e;
734 
735 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
736 		if (e->lapic_prio < ne->lapic_prio) {
737 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
738 			return;
739 		}
740 	}
741 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
742 }
743