xref: /minix3/minix/kernel/arch/earm/arch_clock.c (revision f12160c14d106d9bd9a65646756b1a7f187794ab)
1433d6423SLionel Sambuc /* ARM-specific clock functions. */
2433d6423SLionel Sambuc 
3433d6423SLionel Sambuc #include "kernel/kernel.h"
4433d6423SLionel Sambuc 
5433d6423SLionel Sambuc #include "kernel/clock.h"
6433d6423SLionel Sambuc #include "kernel/interrupt.h"
7433d6423SLionel Sambuc #include <minix/u64.h>
8433d6423SLionel Sambuc #include <minix/board.h>
9433d6423SLionel Sambuc #include "kernel/glo.h"
10433d6423SLionel Sambuc #include "kernel/profile.h"
11433d6423SLionel Sambuc 
12366d18b2SDavid van Moolenbroek #include <sys/sched.h> /* for CP_*, CPUSTATES */
13366d18b2SDavid van Moolenbroek #if CPUSTATES != MINIX_CPUSTATES
14366d18b2SDavid van Moolenbroek /* If this breaks, the code in this file may have to be adapted accordingly. */
15366d18b2SDavid van Moolenbroek #error "MINIX_CPUSTATES value is out of sync with NetBSD's!"
16366d18b2SDavid van Moolenbroek #endif
17433d6423SLionel Sambuc 
18433d6423SLionel Sambuc #include "kernel/spinlock.h"
19433d6423SLionel Sambuc 
20433d6423SLionel Sambuc #ifdef CONFIG_SMP
21433d6423SLionel Sambuc #include "kernel/smp.h"
22*f12160c1SLionel Sambuc #error CONFIG_SMP is unsupported on ARM
23433d6423SLionel Sambuc #endif
24433d6423SLionel Sambuc 
25433d6423SLionel Sambuc #include "bsp_timer.h"
26433d6423SLionel Sambuc #include "bsp_intr.h"
27433d6423SLionel Sambuc 
28433d6423SLionel Sambuc static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
291f3ef2b2SDavid van Moolenbroek static unsigned tsc_per_tick[CONFIG_MAX_CPUS];
30366d18b2SDavid van Moolenbroek static uint64_t tsc_per_state[CONFIG_MAX_CPUS][CPUSTATES];
31433d6423SLionel Sambuc 
init_local_timer(unsigned freq)32433d6423SLionel Sambuc int init_local_timer(unsigned freq)
33433d6423SLionel Sambuc {
34433d6423SLionel Sambuc 	bsp_timer_init(freq);
35433d6423SLionel Sambuc 
36433d6423SLionel Sambuc 	if (BOARD_IS_BBXM(machine.board_id)) {
37433d6423SLionel Sambuc 		tsc_per_ms[0] = 16250;
38433d6423SLionel Sambuc 	} else if (BOARD_IS_BB(machine.board_id)) {
39433d6423SLionel Sambuc 		tsc_per_ms[0] = 15000;
40433d6423SLionel Sambuc 	} else {
41433d6423SLionel Sambuc 		panic("Can not do the clock setup. machine (0x%08x) is unknown\n",machine.board_id);
42433d6423SLionel Sambuc 	};
43433d6423SLionel Sambuc 
441f3ef2b2SDavid van Moolenbroek 	tsc_per_tick[0] = tsc_per_ms[0] * 1000 / system_hz;
451f3ef2b2SDavid van Moolenbroek 
46433d6423SLionel Sambuc 	return 0;
47433d6423SLionel Sambuc }
48433d6423SLionel Sambuc 
stop_local_timer(void)49433d6423SLionel Sambuc void stop_local_timer(void)
50433d6423SLionel Sambuc {
51433d6423SLionel Sambuc 	bsp_timer_stop();
52433d6423SLionel Sambuc }
53433d6423SLionel Sambuc 
arch_timer_int_handler(void)54433d6423SLionel Sambuc void arch_timer_int_handler(void)
55433d6423SLionel Sambuc {
56433d6423SLionel Sambuc 	bsp_timer_int_handler();
57433d6423SLionel Sambuc }
58433d6423SLionel Sambuc 
cycles_accounting_init(void)59433d6423SLionel Sambuc void cycles_accounting_init(void)
60433d6423SLionel Sambuc {
61*f12160c1SLionel Sambuc #ifdef CONFIG_SMP
62*f12160c1SLionel Sambuc 	unsigned cpu = cpuid;
63*f12160c1SLionel Sambuc #endif
64*f12160c1SLionel Sambuc 
65433d6423SLionel Sambuc 	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
66433d6423SLionel Sambuc 
67433d6423SLionel Sambuc 	get_cpu_var(cpu, cpu_last_tsc) = 0;
68433d6423SLionel Sambuc 	get_cpu_var(cpu, cpu_last_idle) = 0;
69433d6423SLionel Sambuc }
70433d6423SLionel Sambuc 
context_stop(struct proc * p)71433d6423SLionel Sambuc void context_stop(struct proc * p)
72433d6423SLionel Sambuc {
73*f12160c1SLionel Sambuc 	u64_t tsc, tsc_delta;
74433d6423SLionel Sambuc 	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
75*f12160c1SLionel Sambuc 	unsigned int cpu, tpt, counter;
76433d6423SLionel Sambuc 
77*f12160c1SLionel Sambuc #ifdef CONFIG_SMP
78*f12160c1SLionel Sambuc #error CONFIG_SMP is unsupported on ARM
79*f12160c1SLionel Sambuc #else
80433d6423SLionel Sambuc 	read_tsc_64(&tsc);
81*f12160c1SLionel Sambuc 	p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
82*f12160c1SLionel Sambuc 	cpu = 0;
83*f12160c1SLionel Sambuc #endif
84*f12160c1SLionel Sambuc 
85433d6423SLionel Sambuc 	tsc_delta = tsc - *__tsc_ctr_switch;
86433d6423SLionel Sambuc 
87433d6423SLionel Sambuc 	if (kbill_ipc) {
88433d6423SLionel Sambuc 		kbill_ipc->p_kipc_cycles += tsc_delta;
89433d6423SLionel Sambuc 		kbill_ipc = NULL;
90433d6423SLionel Sambuc 	}
91433d6423SLionel Sambuc 
92433d6423SLionel Sambuc 	if (kbill_kcall) {
93433d6423SLionel Sambuc 		kbill_kcall->p_kcall_cycles += tsc_delta;
94433d6423SLionel Sambuc 		kbill_kcall = NULL;
95433d6423SLionel Sambuc 	}
96433d6423SLionel Sambuc 
97433d6423SLionel Sambuc 	/*
981f3ef2b2SDavid van Moolenbroek 	 * Perform CPU average accounting here, rather than in the generic
991f3ef2b2SDavid van Moolenbroek 	 * clock handler.  Doing it here offers two advantages: 1) we can
1001f3ef2b2SDavid van Moolenbroek 	 * account for time spent in the kernel, and 2) we properly account for
1011f3ef2b2SDavid van Moolenbroek 	 * CPU time spent by a process that has a lot of short-lasting activity
1021f3ef2b2SDavid van Moolenbroek 	 * such that it spends serious CPU time but never actually runs when a
1031f3ef2b2SDavid van Moolenbroek 	 * clock tick triggers.  Note that clock speed inaccuracy requires that
1041f3ef2b2SDavid van Moolenbroek 	 * the code below is a loop, but the loop will in by far most cases not
1051f3ef2b2SDavid van Moolenbroek 	 * be executed more than once, and often be skipped at all.
1061f3ef2b2SDavid van Moolenbroek 	 */
107*f12160c1SLionel Sambuc 	tpt = tsc_per_tick[cpu];
1081f3ef2b2SDavid van Moolenbroek 
1091f3ef2b2SDavid van Moolenbroek 	p->p_tick_cycles += tsc_delta;
1101f3ef2b2SDavid van Moolenbroek 	while (tpt > 0 && p->p_tick_cycles >= tpt) {
1111f3ef2b2SDavid van Moolenbroek 		p->p_tick_cycles -= tpt;
1121f3ef2b2SDavid van Moolenbroek 
1131f3ef2b2SDavid van Moolenbroek 		/*
1141f3ef2b2SDavid van Moolenbroek 		 * The process has spent roughly a whole clock tick worth of
1151f3ef2b2SDavid van Moolenbroek 		 * CPU cycles.  Update its per-process CPU utilization counter.
1161f3ef2b2SDavid van Moolenbroek 		 * Some of the cycles may actually have been spent in a
1171f3ef2b2SDavid van Moolenbroek 		 * previous second, but that is not a problem.
1181f3ef2b2SDavid van Moolenbroek 		 */
1191f3ef2b2SDavid van Moolenbroek 		cpuavg_increment(&p->p_cpuavg, kclockinfo.uptime, system_hz);
1201f3ef2b2SDavid van Moolenbroek 	}
1211f3ef2b2SDavid van Moolenbroek 
1221f3ef2b2SDavid van Moolenbroek 	/*
123433d6423SLionel Sambuc 	 * deduct the just consumed cpu cycles from the cpu time left for this
124433d6423SLionel Sambuc 	 * process during its current quantum. Skip IDLE and other pseudo kernel
125*f12160c1SLionel Sambuc 	 * tasks, except for global accounting purposes.
126433d6423SLionel Sambuc 	 */
127433d6423SLionel Sambuc 	if (p->p_endpoint >= 0) {
128366d18b2SDavid van Moolenbroek 		/* On MINIX3, the "system" counter covers system processes. */
129366d18b2SDavid van Moolenbroek 		if (p->p_priv != priv_addr(USER_PRIV_ID))
130366d18b2SDavid van Moolenbroek 			counter = CP_SYS;
131366d18b2SDavid van Moolenbroek 		else if (p->p_misc_flags & MF_NICED)
132366d18b2SDavid van Moolenbroek 			counter = CP_NICE;
133366d18b2SDavid van Moolenbroek 		else
134366d18b2SDavid van Moolenbroek 			counter = CP_USER;
135366d18b2SDavid van Moolenbroek 
136433d6423SLionel Sambuc #if DEBUG_RACE
137433d6423SLionel Sambuc 		p->p_cpu_time_left = 0;
138433d6423SLionel Sambuc #else
139433d6423SLionel Sambuc 		if (tsc_delta < p->p_cpu_time_left) {
140433d6423SLionel Sambuc 			p->p_cpu_time_left -= tsc_delta;
141*f12160c1SLionel Sambuc 		} else {
142*f12160c1SLionel Sambuc 			p->p_cpu_time_left = 0;
143*f12160c1SLionel Sambuc 		}
144433d6423SLionel Sambuc #endif
145366d18b2SDavid van Moolenbroek 	} else {
146366d18b2SDavid van Moolenbroek 		/* On MINIX3, the "interrupts" counter covers the kernel. */
147366d18b2SDavid van Moolenbroek 		if (p->p_endpoint == IDLE)
148366d18b2SDavid van Moolenbroek 			counter = CP_IDLE;
149366d18b2SDavid van Moolenbroek 		else
150366d18b2SDavid van Moolenbroek 			counter = CP_INTR;
151433d6423SLionel Sambuc 	}
152433d6423SLionel Sambuc 
153*f12160c1SLionel Sambuc 	tsc_per_state[cpu][counter] += tsc_delta;
154*f12160c1SLionel Sambuc 
155433d6423SLionel Sambuc 	*__tsc_ctr_switch = tsc;
156433d6423SLionel Sambuc }
157433d6423SLionel Sambuc 
context_stop_idle(void)158433d6423SLionel Sambuc void context_stop_idle(void)
159433d6423SLionel Sambuc {
160433d6423SLionel Sambuc 	int is_idle;
161433d6423SLionel Sambuc #ifdef CONFIG_SMP
162433d6423SLionel Sambuc 	unsigned cpu = cpuid;
163433d6423SLionel Sambuc #endif
164433d6423SLionel Sambuc 
165433d6423SLionel Sambuc 	is_idle = get_cpu_var(cpu, cpu_is_idle);
166433d6423SLionel Sambuc 	get_cpu_var(cpu, cpu_is_idle) = 0;
167433d6423SLionel Sambuc 
168433d6423SLionel Sambuc 	context_stop(get_cpulocal_var_ptr(idle_proc));
169433d6423SLionel Sambuc 
170433d6423SLionel Sambuc 	if (is_idle)
171433d6423SLionel Sambuc 		restart_local_timer();
172433d6423SLionel Sambuc #if SPROFILE
173433d6423SLionel Sambuc 	if (sprofiling)
174433d6423SLionel Sambuc 		get_cpulocal_var(idle_interrupted) = 1;
175433d6423SLionel Sambuc #endif
176433d6423SLionel Sambuc }
177433d6423SLionel Sambuc 
restart_local_timer(void)178433d6423SLionel Sambuc void restart_local_timer(void)
179433d6423SLionel Sambuc {
180433d6423SLionel Sambuc }
181433d6423SLionel Sambuc 
register_local_timer_handler(const irq_handler_t handler)182433d6423SLionel Sambuc int register_local_timer_handler(const irq_handler_t handler)
183433d6423SLionel Sambuc {
184433d6423SLionel Sambuc 	return bsp_register_timer_handler(handler);
185433d6423SLionel Sambuc }
186433d6423SLionel Sambuc 
ms_2_cpu_time(unsigned ms)187433d6423SLionel Sambuc u64_t ms_2_cpu_time(unsigned ms)
188433d6423SLionel Sambuc {
189*f12160c1SLionel Sambuc 	return (u64_t)tsc_per_ms[cpuid] * ms;
190433d6423SLionel Sambuc }
191433d6423SLionel Sambuc 
cpu_time_2_ms(u64_t cpu_time)192433d6423SLionel Sambuc unsigned cpu_time_2_ms(u64_t cpu_time)
193433d6423SLionel Sambuc {
194433d6423SLionel Sambuc 	return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
195433d6423SLionel Sambuc }
196433d6423SLionel Sambuc 
cpu_load(void)197433d6423SLionel Sambuc short cpu_load(void)
198433d6423SLionel Sambuc {
199*f12160c1SLionel Sambuc 	u64_t current_tsc, *current_idle;
200*f12160c1SLionel Sambuc 	u64_t tsc_delta, idle_delta, busy;
201*f12160c1SLionel Sambuc 	struct proc *idle;
202*f12160c1SLionel Sambuc 	short load;
203*f12160c1SLionel Sambuc #ifdef CONFIG_SMP
204*f12160c1SLionel Sambuc 	unsigned cpu = cpuid;
205*f12160c1SLionel Sambuc #endif
206*f12160c1SLionel Sambuc 
207*f12160c1SLionel Sambuc 	u64_t *last_tsc, *last_idle;
208*f12160c1SLionel Sambuc 
209*f12160c1SLionel Sambuc 	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
210*f12160c1SLionel Sambuc 	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);
211*f12160c1SLionel Sambuc 
212*f12160c1SLionel Sambuc 	idle = get_cpu_var_ptr(cpu, idle_proc);;
213*f12160c1SLionel Sambuc 	read_tsc_64(&current_tsc);
214*f12160c1SLionel Sambuc 	current_idle = &idle->p_cycles; /* ptr to idle proc */
215*f12160c1SLionel Sambuc 
216*f12160c1SLionel Sambuc 	/* calculate load since last cpu_load invocation */
217*f12160c1SLionel Sambuc 	if (*last_tsc) {
218*f12160c1SLionel Sambuc 		tsc_delta = current_tsc - *last_tsc;
219*f12160c1SLionel Sambuc 		idle_delta = *current_idle - *last_idle;
220*f12160c1SLionel Sambuc 
221*f12160c1SLionel Sambuc 		busy = tsc_delta - idle_delta;
222*f12160c1SLionel Sambuc 		busy = busy * 100;
223*f12160c1SLionel Sambuc 		load = ex64lo(busy / tsc_delta);
224*f12160c1SLionel Sambuc 
225*f12160c1SLionel Sambuc 		if (load > 100)
226*f12160c1SLionel Sambuc 			load = 100;
227*f12160c1SLionel Sambuc 	} else
228*f12160c1SLionel Sambuc 		load = 0;
229*f12160c1SLionel Sambuc 
230*f12160c1SLionel Sambuc 	*last_tsc = current_tsc;
231*f12160c1SLionel Sambuc 	*last_idle = *current_idle;
232*f12160c1SLionel Sambuc 	return load;
233433d6423SLionel Sambuc }
234366d18b2SDavid van Moolenbroek 
235366d18b2SDavid van Moolenbroek /*
236366d18b2SDavid van Moolenbroek  * Return the number of clock ticks spent in each of a predefined number of
237366d18b2SDavid van Moolenbroek  * CPU states.
238366d18b2SDavid van Moolenbroek  */
239366d18b2SDavid van Moolenbroek void
get_cpu_ticks(unsigned int cpu,uint64_t ticks[CPUSTATES])240366d18b2SDavid van Moolenbroek get_cpu_ticks(unsigned int cpu, uint64_t ticks[CPUSTATES])
241366d18b2SDavid van Moolenbroek {
242366d18b2SDavid van Moolenbroek 	int i;
243366d18b2SDavid van Moolenbroek 
244*f12160c1SLionel Sambuc 	/* TODO: make this inter-CPU safe! */
245366d18b2SDavid van Moolenbroek 	for (i = 0; i < CPUSTATES; i++)
246*f12160c1SLionel Sambuc 		ticks[i] = tsc_per_state[cpu][i] / tsc_per_tick[cpu];
247366d18b2SDavid van Moolenbroek }
248