1433d6423SLionel Sambuc /* i386-specific clock functions. */
2433d6423SLionel Sambuc
3433d6423SLionel Sambuc #include <machine/ports.h>
4433d6423SLionel Sambuc
5433d6423SLionel Sambuc #include "kernel/clock.h"
6433d6423SLionel Sambuc #include "kernel/interrupt.h"
7433d6423SLionel Sambuc #include <minix/u64.h>
8433d6423SLionel Sambuc
9366d18b2SDavid van Moolenbroek #include <sys/sched.h> /* for CP_*, CPUSTATES */
10366d18b2SDavid van Moolenbroek #if CPUSTATES != MINIX_CPUSTATES
11366d18b2SDavid van Moolenbroek /* If this breaks, the code in this file may have to be adapted accordingly. */
12366d18b2SDavid van Moolenbroek #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
13366d18b2SDavid van Moolenbroek #endif
14433d6423SLionel Sambuc
15433d6423SLionel Sambuc #ifdef USE_APIC
16433d6423SLionel Sambuc #include "apic.h"
17433d6423SLionel Sambuc #endif
18433d6423SLionel Sambuc
19433d6423SLionel Sambuc #include "kernel/spinlock.h"
20433d6423SLionel Sambuc
21433d6423SLionel Sambuc #ifdef CONFIG_SMP
22433d6423SLionel Sambuc #include "kernel/smp.h"
23433d6423SLionel Sambuc #endif
24433d6423SLionel Sambuc
25433d6423SLionel Sambuc #define CLOCK_ACK_BIT 0x80 /* PS/2 clock interrupt acknowledge bit */
26433d6423SLionel Sambuc
27433d6423SLionel Sambuc /* Clock parameters. */
28433d6423SLionel Sambuc #define COUNTER_FREQ (2*TIMER_FREQ) /* counter frequency using square wave */
29433d6423SLionel Sambuc #define LATCH_COUNT 0x00 /* cc00xxxx, c = channel, x = any */
30433d6423SLionel Sambuc #define SQUARE_WAVE 0x36 /* ccaammmb, a = access, m = mode, b = BCD */
31433d6423SLionel Sambuc /* 11x11, 11 = LSB then MSB, x11 = sq wave */
32433d6423SLionel Sambuc #define TIMER_FREQ 1193182 /* clock frequency for timer in PC and AT */
33433d6423SLionel Sambuc #define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
34433d6423SLionel Sambuc
35433d6423SLionel Sambuc static irq_hook_t pic_timer_hook; /* interrupt handler hook */
36433d6423SLionel Sambuc
37433d6423SLionel Sambuc static unsigned probe_ticks;
38433d6423SLionel Sambuc static u64_t tsc0, tsc1;
39433d6423SLionel Sambuc #define PROBE_TICKS (system_hz / 10)
40433d6423SLionel Sambuc
41433d6423SLionel Sambuc static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
42366d18b2SDavid van Moolenbroek static unsigned tsc_per_tick[CONFIG_MAX_CPUS];
43366d18b2SDavid van Moolenbroek static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
44433d6423SLionel Sambuc
45433d6423SLionel Sambuc /*===========================================================================*
46433d6423SLionel Sambuc * init_8235A_timer *
47433d6423SLionel Sambuc *===========================================================================*/
init_8253A_timer(const unsigned freq)48433d6423SLionel Sambuc int init_8253A_timer(const unsigned freq)
49433d6423SLionel Sambuc {
50433d6423SLionel Sambuc /* Initialize channel 0 of the 8253A timer to, e.g., 60 Hz,
51433d6423SLionel Sambuc * and register the CLOCK task's interrupt handler to be run
52433d6423SLionel Sambuc * on every clock tick.
53433d6423SLionel Sambuc */
54433d6423SLionel Sambuc outb(TIMER_MODE, SQUARE_WAVE); /* run continuously */
55433d6423SLionel Sambuc outb(TIMER0, (TIMER_COUNT(freq) & 0xff)); /* timer low byte */
56433d6423SLionel Sambuc outb(TIMER0, TIMER_COUNT(freq) >> 8); /* timer high byte */
57433d6423SLionel Sambuc
58433d6423SLionel Sambuc return OK;
59433d6423SLionel Sambuc }
60433d6423SLionel Sambuc
61433d6423SLionel Sambuc /*===========================================================================*
62433d6423SLionel Sambuc * stop_8235A_timer *
63433d6423SLionel Sambuc *===========================================================================*/
stop_8253A_timer(void)64433d6423SLionel Sambuc void stop_8253A_timer(void)
65433d6423SLionel Sambuc {
66433d6423SLionel Sambuc /* Reset the clock to the BIOS rate. (For rebooting.) */
67433d6423SLionel Sambuc outb(TIMER_MODE, 0x36);
68433d6423SLionel Sambuc outb(TIMER0, 0);
69433d6423SLionel Sambuc outb(TIMER0, 0);
70433d6423SLionel Sambuc }
71433d6423SLionel Sambuc
arch_timer_int_handler(void)72433d6423SLionel Sambuc void arch_timer_int_handler(void)
73433d6423SLionel Sambuc {
74433d6423SLionel Sambuc }
75433d6423SLionel Sambuc
calib_cpu_handler(irq_hook_t * UNUSED (hook))76433d6423SLionel Sambuc static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
77433d6423SLionel Sambuc {
78433d6423SLionel Sambuc u64_t tsc;
79433d6423SLionel Sambuc
80433d6423SLionel Sambuc probe_ticks++;
81433d6423SLionel Sambuc read_tsc_64(&tsc);
82433d6423SLionel Sambuc
83433d6423SLionel Sambuc
84433d6423SLionel Sambuc if (probe_ticks == 1) {
85433d6423SLionel Sambuc tsc0 = tsc;
86433d6423SLionel Sambuc }
87433d6423SLionel Sambuc else if (probe_ticks == PROBE_TICKS) {
88433d6423SLionel Sambuc tsc1 = tsc;
89433d6423SLionel Sambuc }
90433d6423SLionel Sambuc
91433d6423SLionel Sambuc /* just in case we are in an SMP single cpu fallback mode */
92433d6423SLionel Sambuc BKL_UNLOCK();
93433d6423SLionel Sambuc return 1;
94433d6423SLionel Sambuc }
95433d6423SLionel Sambuc
estimate_cpu_freq(void)96433d6423SLionel Sambuc static void estimate_cpu_freq(void)
97433d6423SLionel Sambuc {
98433d6423SLionel Sambuc u64_t tsc_delta;
99433d6423SLionel Sambuc u64_t cpu_freq;
100433d6423SLionel Sambuc
101433d6423SLionel Sambuc irq_hook_t calib_cpu;
102433d6423SLionel Sambuc
103433d6423SLionel Sambuc /* set the probe, we use the legacy timer, IRQ 0 */
104433d6423SLionel Sambuc put_irq_handler(&calib_cpu, CLOCK_IRQ, calib_cpu_handler);
105433d6423SLionel Sambuc
106433d6423SLionel Sambuc /* just in case we are in an SMP single cpu fallback mode */
107433d6423SLionel Sambuc BKL_UNLOCK();
108433d6423SLionel Sambuc /* set the PIC timer to get some time */
109433d6423SLionel Sambuc intr_enable();
110433d6423SLionel Sambuc
111433d6423SLionel Sambuc /* loop for some time to get a sample */
112433d6423SLionel Sambuc while(probe_ticks < PROBE_TICKS) {
113433d6423SLionel Sambuc intr_enable();
114433d6423SLionel Sambuc }
115433d6423SLionel Sambuc
116433d6423SLionel Sambuc intr_disable();
117433d6423SLionel Sambuc /* just in case we are in an SMP single cpu fallback mode */
118433d6423SLionel Sambuc BKL_LOCK();
119433d6423SLionel Sambuc
120433d6423SLionel Sambuc /* remove the probe */
121433d6423SLionel Sambuc rm_irq_handler(&calib_cpu);
122433d6423SLionel Sambuc
123433d6423SLionel Sambuc tsc_delta = tsc1 - tsc0;
124433d6423SLionel Sambuc
125433d6423SLionel Sambuc cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
126433d6423SLionel Sambuc cpu_set_freq(cpuid, cpu_freq);
127433d6423SLionel Sambuc cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
128433d6423SLionel Sambuc BOOT_VERBOSE(cpu_print_freq(cpuid));
129433d6423SLionel Sambuc }
130433d6423SLionel Sambuc
init_local_timer(unsigned freq)131433d6423SLionel Sambuc int init_local_timer(unsigned freq)
132433d6423SLionel Sambuc {
133433d6423SLionel Sambuc #ifdef USE_APIC
134433d6423SLionel Sambuc /* if we know the address, lapic is enabled and we should use it */
135433d6423SLionel Sambuc if (lapic_addr) {
136433d6423SLionel Sambuc unsigned cpu = cpuid;
137366d18b2SDavid van Moolenbroek tsc_per_ms[cpu] = (unsigned)(cpu_get_freq(cpu) / 1000);
138366d18b2SDavid van Moolenbroek tsc_per_tick[cpu] = (unsigned)(cpu_get_freq(cpu) / system_hz);
139433d6423SLionel Sambuc lapic_set_timer_one_shot(1000000 / system_hz);
140433d6423SLionel Sambuc } else {
141da9af514SLionel Sambuc DEBUGBASIC(("Initiating legacy i8253 timer\n"));
142433d6423SLionel Sambuc #else
143433d6423SLionel Sambuc {
144433d6423SLionel Sambuc #endif
145433d6423SLionel Sambuc init_8253A_timer(freq);
146433d6423SLionel Sambuc estimate_cpu_freq();
147433d6423SLionel Sambuc /* always only 1 cpu in the system */
148433d6423SLionel Sambuc tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
149366d18b2SDavid van Moolenbroek tsc_per_tick[0] = (unsigned)(cpu_get_freq(0) / system_hz);
150433d6423SLionel Sambuc }
151433d6423SLionel Sambuc
152433d6423SLionel Sambuc return 0;
153433d6423SLionel Sambuc }
154433d6423SLionel Sambuc
155433d6423SLionel Sambuc void stop_local_timer(void)
156433d6423SLionel Sambuc {
157433d6423SLionel Sambuc #ifdef USE_APIC
158433d6423SLionel Sambuc if (lapic_addr) {
159433d6423SLionel Sambuc lapic_stop_timer();
160433d6423SLionel Sambuc apic_eoi();
161433d6423SLionel Sambuc } else
162433d6423SLionel Sambuc #endif
163433d6423SLionel Sambuc {
164433d6423SLionel Sambuc stop_8253A_timer();
165433d6423SLionel Sambuc }
166433d6423SLionel Sambuc }
167433d6423SLionel Sambuc
168433d6423SLionel Sambuc void restart_local_timer(void)
169433d6423SLionel Sambuc {
170433d6423SLionel Sambuc #ifdef USE_APIC
171433d6423SLionel Sambuc if (lapic_addr) {
172433d6423SLionel Sambuc lapic_restart_timer();
173433d6423SLionel Sambuc }
174433d6423SLionel Sambuc #endif
175433d6423SLionel Sambuc }
176433d6423SLionel Sambuc
177433d6423SLionel Sambuc int register_local_timer_handler(const irq_handler_t handler)
178433d6423SLionel Sambuc {
179433d6423SLionel Sambuc #ifdef USE_APIC
180433d6423SLionel Sambuc if (lapic_addr) {
181433d6423SLionel Sambuc /* Using APIC, it is configured in apic_idt_init() */
182433d6423SLionel Sambuc BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
183433d6423SLionel Sambuc } else
184433d6423SLionel Sambuc #endif
185433d6423SLionel Sambuc {
186433d6423SLionel Sambuc /* Using PIC, Initialize the CLOCK's interrupt hook. */
187433d6423SLionel Sambuc pic_timer_hook.proc_nr_e = NONE;
188433d6423SLionel Sambuc pic_timer_hook.irq = CLOCK_IRQ;
189433d6423SLionel Sambuc
190433d6423SLionel Sambuc put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
191433d6423SLionel Sambuc }
192433d6423SLionel Sambuc
193433d6423SLionel Sambuc return 0;
194433d6423SLionel Sambuc }
195433d6423SLionel Sambuc
196433d6423SLionel Sambuc void cycles_accounting_init(void)
197433d6423SLionel Sambuc {
198433d6423SLionel Sambuc #ifdef CONFIG_SMP
199433d6423SLionel Sambuc unsigned cpu = cpuid;
200433d6423SLionel Sambuc #endif
201433d6423SLionel Sambuc
202433d6423SLionel Sambuc read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
203433d6423SLionel Sambuc
204433d6423SLionel Sambuc get_cpu_var(cpu, cpu_last_tsc) = 0;
205433d6423SLionel Sambuc get_cpu_var(cpu, cpu_last_idle) = 0;
206433d6423SLionel Sambuc }
207433d6423SLionel Sambuc
208433d6423SLionel Sambuc void context_stop(struct proc * p)
209433d6423SLionel Sambuc {
210433d6423SLionel Sambuc u64_t tsc, tsc_delta;
211433d6423SLionel Sambuc u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
2121f3ef2b2SDavid van Moolenbroek unsigned int cpu, tpt, counter;
213433d6423SLionel Sambuc #ifdef CONFIG_SMP
214433d6423SLionel Sambuc int must_bkl_unlock = 0;
215433d6423SLionel Sambuc
216366d18b2SDavid van Moolenbroek cpu = cpuid;
217366d18b2SDavid van Moolenbroek
218433d6423SLionel Sambuc /*
219433d6423SLionel Sambuc * This function is called only if we switch from kernel to user or idle
220433d6423SLionel Sambuc * or back. Therefore this is a perfect location to place the big kernel
221433d6423SLionel Sambuc * lock which will hopefully disappear soon.
222433d6423SLionel Sambuc *
223433d6423SLionel Sambuc * If we stop accounting for KERNEL we must unlock the BKL. If account
224433d6423SLionel Sambuc * for IDLE we must not hold the lock
225433d6423SLionel Sambuc */
226433d6423SLionel Sambuc if (p == proc_addr(KERNEL)) {
227433d6423SLionel Sambuc u64_t tmp;
228433d6423SLionel Sambuc
229433d6423SLionel Sambuc read_tsc_64(&tsc);
230433d6423SLionel Sambuc tmp = tsc - *__tsc_ctr_switch;
231433d6423SLionel Sambuc kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
232433d6423SLionel Sambuc p->p_cycles = p->p_cycles + tmp;
233433d6423SLionel Sambuc must_bkl_unlock = 1;
234433d6423SLionel Sambuc } else {
235433d6423SLionel Sambuc u64_t bkl_tsc;
236433d6423SLionel Sambuc atomic_t succ;
237433d6423SLionel Sambuc
238433d6423SLionel Sambuc read_tsc_64(&bkl_tsc);
239433d6423SLionel Sambuc /* this only gives a good estimate */
240433d6423SLionel Sambuc succ = big_kernel_lock.val;
241433d6423SLionel Sambuc
242433d6423SLionel Sambuc BKL_LOCK();
243433d6423SLionel Sambuc
244433d6423SLionel Sambuc read_tsc_64(&tsc);
245433d6423SLionel Sambuc
246433d6423SLionel Sambuc bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
247433d6423SLionel Sambuc bkl_tries[cpu]++;
248433d6423SLionel Sambuc bkl_succ[cpu] += !(!(succ == 0));
249433d6423SLionel Sambuc
250433d6423SLionel Sambuc p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
251433d6423SLionel Sambuc
252433d6423SLionel Sambuc #ifdef CONFIG_SMP
253433d6423SLionel Sambuc /*
254433d6423SLionel Sambuc * Since at the time we got a scheduling IPI we might have been
255433d6423SLionel Sambuc * waiting for BKL already, we may miss it due to a similar IPI to
256433d6423SLionel Sambuc * the cpu which is already waiting for us to handle its. This
257433d6423SLionel Sambuc * results in a live-lock of these two cpus.
258433d6423SLionel Sambuc *
259433d6423SLionel Sambuc * Therefore we always check if there is one pending and if so,
260433d6423SLionel Sambuc * we handle it straight away so the other cpu can continue and
261433d6423SLionel Sambuc * we do not deadlock.
262433d6423SLionel Sambuc */
263433d6423SLionel Sambuc smp_sched_handler();
264433d6423SLionel Sambuc #endif
265433d6423SLionel Sambuc }
266433d6423SLionel Sambuc #else
267433d6423SLionel Sambuc read_tsc_64(&tsc);
268433d6423SLionel Sambuc p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
269366d18b2SDavid van Moolenbroek cpu = 0;
270433d6423SLionel Sambuc #endif
271433d6423SLionel Sambuc
272433d6423SLionel Sambuc tsc_delta = tsc - *__tsc_ctr_switch;
273433d6423SLionel Sambuc
274433d6423SLionel Sambuc if (kbill_ipc) {
275*f12160c1SLionel Sambuc kbill_ipc->p_kipc_cycles += tsc_delta;
276433d6423SLionel Sambuc kbill_ipc = NULL;
277433d6423SLionel Sambuc }
278433d6423SLionel Sambuc
279433d6423SLionel Sambuc if (kbill_kcall) {
280*f12160c1SLionel Sambuc kbill_kcall->p_kcall_cycles += tsc_delta;
281433d6423SLionel Sambuc kbill_kcall = NULL;
282433d6423SLionel Sambuc }
283433d6423SLionel Sambuc
284433d6423SLionel Sambuc /*
2851f3ef2b2SDavid van Moolenbroek * Perform CPU average accounting here, rather than in the generic
2861f3ef2b2SDavid van Moolenbroek * clock handler. Doing it here offers two advantages: 1) we can
2871f3ef2b2SDavid van Moolenbroek * account for time spent in the kernel, and 2) we properly account for
2881f3ef2b2SDavid van Moolenbroek * CPU time spent by a process that has a lot of short-lasting activity
2891f3ef2b2SDavid van Moolenbroek * such that it spends serious CPU time but never actually runs when a
2901f3ef2b2SDavid van Moolenbroek * clock tick triggers. Note that clock speed inaccuracy requires that
2911f3ef2b2SDavid van Moolenbroek * the code below is a loop, but the loop will in by far most cases not
2921f3ef2b2SDavid van Moolenbroek * be executed more than once, and often be skipped at all.
2931f3ef2b2SDavid van Moolenbroek */
2941f3ef2b2SDavid van Moolenbroek tpt = tsc_per_tick[cpu];
2951f3ef2b2SDavid van Moolenbroek
2961f3ef2b2SDavid van Moolenbroek p->p_tick_cycles += tsc_delta;
2971f3ef2b2SDavid van Moolenbroek while (tpt > 0 && p->p_tick_cycles >= tpt) {
2981f3ef2b2SDavid van Moolenbroek p->p_tick_cycles -= tpt;
2991f3ef2b2SDavid van Moolenbroek
3001f3ef2b2SDavid van Moolenbroek /*
3011f3ef2b2SDavid van Moolenbroek * The process has spent roughly a whole clock tick worth of
3021f3ef2b2SDavid van Moolenbroek * CPU cycles. Update its per-process CPU utilization counter.
3031f3ef2b2SDavid van Moolenbroek * Some of the cycles may actually have been spent in a
3041f3ef2b2SDavid van Moolenbroek * previous second, but that is not a problem.
3051f3ef2b2SDavid van Moolenbroek */
3061f3ef2b2SDavid van Moolenbroek cpuavg_increment(&p->p_cpuavg, kclockinfo.uptime, system_hz);
3071f3ef2b2SDavid van Moolenbroek }
3081f3ef2b2SDavid van Moolenbroek
3091f3ef2b2SDavid van Moolenbroek /*
310433d6423SLionel Sambuc * deduct the just consumed cpu cycles from the cpu time left for this
311433d6423SLionel Sambuc * process during its current quantum. Skip IDLE and other pseudo kernel
312366d18b2SDavid van Moolenbroek * tasks, except for global accounting purposes.
313433d6423SLionel Sambuc */
314433d6423SLionel Sambuc if (p->p_endpoint >= 0) {
315366d18b2SDavid van Moolenbroek /* On MINIX3, the "system" counter covers system processes. */
316366d18b2SDavid van Moolenbroek if (p->p_priv != priv_addr(USER_PRIV_ID))
317366d18b2SDavid van Moolenbroek counter = CP_SYS;
318366d18b2SDavid van Moolenbroek else if (p->p_misc_flags & MF_NICED)
319366d18b2SDavid van Moolenbroek counter = CP_NICE;
320366d18b2SDavid van Moolenbroek else
321366d18b2SDavid van Moolenbroek counter = CP_USER;
322366d18b2SDavid van Moolenbroek
323433d6423SLionel Sambuc #if DEBUG_RACE
324433d6423SLionel Sambuc p->p_cpu_time_left = 0;
325433d6423SLionel Sambuc #else
326*f12160c1SLionel Sambuc if (tsc_delta < p->p_cpu_time_left) {
327*f12160c1SLionel Sambuc p->p_cpu_time_left -= tsc_delta;
328*f12160c1SLionel Sambuc } else {
329433d6423SLionel Sambuc p->p_cpu_time_left = 0;
330433d6423SLionel Sambuc }
331433d6423SLionel Sambuc #endif
332366d18b2SDavid van Moolenbroek } else {
333366d18b2SDavid van Moolenbroek /* On MINIX3, the "interrupts" counter covers the kernel. */
334366d18b2SDavid van Moolenbroek if (p->p_endpoint == IDLE)
335366d18b2SDavid van Moolenbroek counter = CP_IDLE;
336366d18b2SDavid van Moolenbroek else
337366d18b2SDavid van Moolenbroek counter = CP_INTR;
338433d6423SLionel Sambuc }
339433d6423SLionel Sambuc
340366d18b2SDavid van Moolenbroek tsc_per_state[cpu][counter] += tsc_delta;
341366d18b2SDavid van Moolenbroek
342433d6423SLionel Sambuc *__tsc_ctr_switch = tsc;
343433d6423SLionel Sambuc
344433d6423SLionel Sambuc #ifdef CONFIG_SMP
345433d6423SLionel Sambuc if(must_bkl_unlock) {
346433d6423SLionel Sambuc BKL_UNLOCK();
347433d6423SLionel Sambuc }
348433d6423SLionel Sambuc #endif
349433d6423SLionel Sambuc }
350433d6423SLionel Sambuc
351433d6423SLionel Sambuc void context_stop_idle(void)
352433d6423SLionel Sambuc {
353433d6423SLionel Sambuc int is_idle;
354433d6423SLionel Sambuc #ifdef CONFIG_SMP
355433d6423SLionel Sambuc unsigned cpu = cpuid;
356433d6423SLionel Sambuc #endif
357433d6423SLionel Sambuc
358433d6423SLionel Sambuc is_idle = get_cpu_var(cpu, cpu_is_idle);
359433d6423SLionel Sambuc get_cpu_var(cpu, cpu_is_idle) = 0;
360433d6423SLionel Sambuc
361433d6423SLionel Sambuc context_stop(get_cpulocal_var_ptr(idle_proc));
362433d6423SLionel Sambuc
363433d6423SLionel Sambuc if (is_idle)
364433d6423SLionel Sambuc restart_local_timer();
365433d6423SLionel Sambuc #if SPROFILE
366433d6423SLionel Sambuc if (sprofiling)
367433d6423SLionel Sambuc get_cpulocal_var(idle_interrupted) = 1;
368433d6423SLionel Sambuc #endif
369433d6423SLionel Sambuc }
370433d6423SLionel Sambuc
371433d6423SLionel Sambuc u64_t ms_2_cpu_time(unsigned ms)
372433d6423SLionel Sambuc {
373433d6423SLionel Sambuc return (u64_t)tsc_per_ms[cpuid] * ms;
374433d6423SLionel Sambuc }
375433d6423SLionel Sambuc
376433d6423SLionel Sambuc unsigned cpu_time_2_ms(u64_t cpu_time)
377433d6423SLionel Sambuc {
378433d6423SLionel Sambuc return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
379433d6423SLionel Sambuc }
380433d6423SLionel Sambuc
381433d6423SLionel Sambuc short cpu_load(void)
382433d6423SLionel Sambuc {
383433d6423SLionel Sambuc u64_t current_tsc, *current_idle;
384433d6423SLionel Sambuc u64_t tsc_delta, idle_delta, busy;
385433d6423SLionel Sambuc struct proc *idle;
386433d6423SLionel Sambuc short load;
387433d6423SLionel Sambuc #ifdef CONFIG_SMP
388433d6423SLionel Sambuc unsigned cpu = cpuid;
389433d6423SLionel Sambuc #endif
390433d6423SLionel Sambuc
391433d6423SLionel Sambuc u64_t *last_tsc, *last_idle;
392433d6423SLionel Sambuc
393433d6423SLionel Sambuc last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
394433d6423SLionel Sambuc last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
395433d6423SLionel Sambuc
396433d6423SLionel Sambuc idle = get_cpu_var_ptr(cpu, idle_proc);;
397433d6423SLionel Sambuc read_tsc_64(¤t_tsc);
398433d6423SLionel Sambuc current_idle = &idle->p_cycles; /* ptr to idle proc */
399433d6423SLionel Sambuc
400433d6423SLionel Sambuc /* calculate load since last cpu_load invocation */
401433d6423SLionel Sambuc if (*last_tsc) {
402433d6423SLionel Sambuc tsc_delta = current_tsc - *last_tsc;
403433d6423SLionel Sambuc idle_delta = *current_idle - *last_idle;
404433d6423SLionel Sambuc
405433d6423SLionel Sambuc busy = tsc_delta - idle_delta;
406433d6423SLionel Sambuc busy = busy * 100;
407433d6423SLionel Sambuc load = ex64lo(busy / tsc_delta);
408433d6423SLionel Sambuc
409433d6423SLionel Sambuc if (load > 100)
410433d6423SLionel Sambuc load = 100;
411433d6423SLionel Sambuc } else
412433d6423SLionel Sambuc load = 0;
413433d6423SLionel Sambuc
414433d6423SLionel Sambuc *last_tsc = current_tsc;
415433d6423SLionel Sambuc *last_idle = *current_idle;
416433d6423SLionel Sambuc return load;
417433d6423SLionel Sambuc }
418433d6423SLionel Sambuc
419433d6423SLionel Sambuc void busy_delay_ms(int ms)
420433d6423SLionel Sambuc {
421433d6423SLionel Sambuc u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
422433d6423SLionel Sambuc read_tsc_64(&tsc0);
423433d6423SLionel Sambuc tsc1 = tsc0 + cycles;
424433d6423SLionel Sambuc do { read_tsc_64(&tsc); } while(tsc < tsc1);
425433d6423SLionel Sambuc return;
426433d6423SLionel Sambuc }
427433d6423SLionel Sambuc
428366d18b2SDavid van Moolenbroek /*
429366d18b2SDavid van Moolenbroek * Return the number of clock ticks spent in each of a predefined number of
430366d18b2SDavid van Moolenbroek * CPU states.
431366d18b2SDavid van Moolenbroek */
432366d18b2SDavid van Moolenbroek void
433366d18b2SDavid van Moolenbroek get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
434366d18b2SDavid van Moolenbroek {
435366d18b2SDavid van Moolenbroek int i;
436366d18b2SDavid van Moolenbroek
437366d18b2SDavid van Moolenbroek /* TODO: make this inter-CPU safe! */
438366d18b2SDavid van Moolenbroek for (i = 0; i < CPUSTATES; i++)
439366d18b2SDavid van Moolenbroek ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
440366d18b2SDavid van Moolenbroek }
441