1 /* $OpenBSD: cpu.c,v 1.77 2024/03/29 21:26:38 miod Exp $ */
2 /* $NetBSD: cpu.c,v 1.13 2001/05/26 21:27:15 chs Exp $ */
3
4 /*
5 * Copyright (c) 1996
6 * The President and Fellows of Harvard College. All rights reserved.
7 * Copyright (c) 1992, 1993
8 * The Regents of the University of California. All rights reserved.
9 *
10 * This software was developed by the Computer Systems Engineering group
11 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12 * contributed to Berkeley.
13 *
14 * All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Harvard University.
17 * This product includes software developed by the University of
18 * California, Lawrence Berkeley Laboratory.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * 1. Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in the
28 * documentation and/or other materials provided with the distribution.
29 * 3. All advertising materials mentioning features or use of this software
30 * must display the following acknowledgement:
31 * This product includes software developed by Aaron Brown and
32 * Harvard University.
33 * This product includes software developed by the University of
34 * California, Berkeley and its contributors.
35 * 4. Neither the name of the University nor the names of its contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 * SUCH DAMAGE.
50 *
51 * @(#)cpu.c 8.5 (Berkeley) 11/23/93
52 *
53 */
54
55 #include <sys/param.h>
56 #include <sys/device.h>
57 #include <sys/proc.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/autoconf.h>
64 #include <machine/cpu.h>
65 #include <machine/reg.h>
66 #include <machine/hypervisor.h>
67 #include <machine/openfirm.h>
68 #include <machine/pmap.h>
69 #include <machine/sparc64.h>
70
71 #include <sparc64/sparc64/cache.h>
72
73 #include <sparc64/dev/starfire.h>
74
75 struct cacheinfo cacheinfo = {
76 .c_dcache_flush_page = us_dcache_flush_page
77 };
78
79 void (*cpu_start_clock)(void);
80
81 /* Linked list of all CPUs in system. */
82 struct cpu_info *cpus = NULL;
83
84 struct cpu_info *alloc_cpuinfo(struct mainbus_attach_args *);
85
86 /* The following are used externally (sysctl_hw). */
87 char machine[] = MACHINE; /* from <machine/param.h> */
88 char cpu_model[100];
89
90 /* The CPU configuration driver. */
91 int cpu_match(struct device *, void *, void *);
92 void cpu_attach(struct device *, struct device *, void *);
93
94 const struct cfattach cpu_ca = {
95 sizeof(struct device), cpu_match, cpu_attach
96 };
97
98 void cpu_init(struct cpu_info *ci);
99 void cpu_hatch(void);
100
101 int sparc64_cpuspeed(int *);
102
103 int hummingbird_div(uint64_t);
104 uint64_t hummingbird_estar_mode(int);
105 void hummingbird_enable_self_refresh(void);
106 void hummingbird_disable_self_refresh(void);
107 void hummingbird_set_refresh_count(int, int);
108 void hummingbird_setperf(int);
109 void hummingbird_init(struct cpu_info *ci);
110
111 #define IU_IMPL(v) ((((u_int64_t)(v))&VER_IMPL) >> VER_IMPL_SHIFT)
112 #define IU_VERS(v) ((((u_int64_t)(v))&VER_MASK) >> VER_MASK_SHIFT)
113
114 /* virtual address allocation mode for struct cpu_info */
115 struct kmem_va_mode kv_cpu_info = {
116 .kv_map = &kernel_map,
117 .kv_align = 8 * PAGE_SIZE
118 };
119
120 struct cpu_info *
alloc_cpuinfo(struct mainbus_attach_args * ma)121 alloc_cpuinfo(struct mainbus_attach_args *ma)
122 {
123 paddr_t pa0, pa;
124 vaddr_t va, va0;
125 vsize_t sz = 8 * PAGE_SIZE;
126 int portid;
127 struct cpu_info *cpi, *ci;
128 extern paddr_t cpu0paddr;
129
130 portid = getpropint(ma->ma_node, "upa-portid", -1);
131 if (portid == -1)
132 portid = getpropint(ma->ma_node, "portid", -1);
133 if (portid == -1)
134 portid = getpropint(ma->ma_node, "cpuid", -1);
135 if (portid == -1 && ma->ma_nreg > 0)
136 portid = (ma->ma_reg[0].ur_paddr >> 32) & 0x0fffffff;
137 if (portid == -1)
138 panic("alloc_cpuinfo: portid");
139
140 for (cpi = cpus; cpi != NULL; cpi = cpi->ci_next)
141 if (cpi->ci_upaid == portid)
142 return cpi;
143
144 va = (vaddr_t)km_alloc(sz, &kv_cpu_info, &kp_none, &kd_nowait);
145 if (va == 0)
146 panic("alloc_cpuinfo: no virtual space");
147 va0 = va;
148
149 pa0 = cpu0paddr;
150 cpu0paddr += sz;
151
152 for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
153 pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
154
155 pmap_update(pmap_kernel());
156
157 cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
158
159 memset((void *)va0, 0, sz);
160
161 /*
162 * Initialize cpuinfo structure.
163 *
164 * Arrange pcb, idle stack and interrupt stack in the same
165 * way as is done for the boot CPU in pmap.c.
166 */
167 cpi->ci_next = NULL;
168 cpi->ci_curproc = NULL;
169 cpi->ci_cpuid = ncpus++;
170 cpi->ci_upaid = portid;
171 cpi->ci_fpproc = NULL;
172 #ifdef MULTIPROCESSOR
173 cpi->ci_spinup = cpu_hatch; /* XXX */
174 #else
175 cpi->ci_spinup = NULL;
176 #endif
177
178 cpi->ci_initstack = cpi;
179 cpi->ci_paddr = pa0;
180 #ifdef SUN4V
181 cpi->ci_mmfsa = pa0;
182 #endif
183 cpi->ci_self = cpi;
184 cpi->ci_node = ma->ma_node;
185
186 clockqueue_init(&cpi->ci_queue);
187 sched_init_cpu(cpi);
188
189 /*
190 * Finally, add itself to the list of active cpus.
191 */
192 for (ci = cpus; ci->ci_next != NULL; ci = ci->ci_next)
193 ;
194 ci->ci_next = cpi;
195 return (cpi);
196 }
197
198 int
cpu_match(struct device * parent,void * match,void * aux)199 cpu_match(struct device *parent, void *match, void *aux)
200 {
201 struct mainbus_attach_args *ma = aux;
202 char buf[32];
203 int portid;
204
205 if (OF_getprop(ma->ma_node, "device_type", buf, sizeof(buf)) <= 0 ||
206 strcmp(buf, "cpu") != 0)
207 return (0);
208
209 /*
210 * Make sure we don't match more than the maximum supported
211 * number of CPUs. But do match the CPU we're running.
212 */
213 portid = getpropint(ma->ma_node, "upa-portid", -1);
214 if (portid == -1)
215 portid = getpropint(ma->ma_node, "portid", -1);
216 if (portid == -1)
217 portid = getpropint(ma->ma_node, "cpuid", -1);
218 if (portid == -1 && ma->ma_nreg > 0)
219 portid = (ma->ma_reg[0].ur_paddr >> 32) & 0x0fffffff;
220 if (portid == -1)
221 return (0);
222
223 if (ncpus < MAXCPUS || portid == cpu_myid())
224 return (1);
225
226 return (0);
227 }
228
229 /*
230 * Attach the CPU.
231 * Discover interesting goop about the virtual address cache
232 * (slightly funny place to do it, but this is where it is to be found).
233 */
234 void
cpu_attach(struct device * parent,struct device * dev,void * aux)235 cpu_attach(struct device *parent, struct device *dev, void *aux)
236 {
237 int node;
238 u_int clk;
239 int impl, vers;
240 struct mainbus_attach_args *ma = aux;
241 struct cpu_info *ci;
242 const char *sep;
243 register int i, l;
244 u_int64_t ver = 0;
245 extern u_int64_t cpu_clockrate[];
246
247 if (CPU_ISSUN4U || CPU_ISSUN4US)
248 ver = getver();
249 impl = IU_IMPL(ver);
250 vers = IU_VERS(ver);
251
252 /* tell them what we have */
253 if (strcmp(parent->dv_cfdata->cf_driver->cd_name, "core") == 0)
254 node = OF_parent(ma->ma_node);
255 else
256 node = ma->ma_node;
257
258 /*
259 * Allocate cpu_info structure if needed.
260 */
261 ci = alloc_cpuinfo(ma);
262 ci->ci_node = ma->ma_node;
263
264 clk = getpropint(node, "clock-frequency", 0);
265 if (clk == 0) {
266 /*
267 * Try to find it in the OpenPROM root...
268 */
269 clk = getpropint(findroot(), "clock-frequency", 0);
270 }
271 if (clk) {
272 cpu_clockrate[0] = clk; /* Tell OS what frequency we run on */
273 cpu_clockrate[1] = clk/1000000;
274 }
275 snprintf(cpu_model, sizeof cpu_model, "%s (rev %d.%d) @ %s MHz",
276 ma->ma_name, vers >> 4, vers & 0xf, clockfreq(clk));
277 printf(": %s\n", cpu_model);
278
279 cpu_cpuspeed = sparc64_cpuspeed;
280
281 if (ci->ci_upaid == cpu_myid())
282 cpu_init(ci);
283
284 l = getpropint(node, "icache-line-size", 0);
285 if (l == 0)
286 l = getpropint(node, "l1-icache-line-size", 0);
287 cacheinfo.ic_linesize = l;
288 for (i = 0; (1 << i) < l && l; i++)
289 /* void */;
290 if ((1 << i) != l && l)
291 panic("bad icache line size %d", l);
292 cacheinfo.ic_totalsize = getpropint(node, "icache-size", 0);
293 if (cacheinfo.ic_totalsize == 0)
294 cacheinfo.ic_totalsize = getpropint(node, "l1-icache-size", 0);
295 if (cacheinfo.ic_totalsize == 0)
296 cacheinfo.ic_totalsize = l *
297 getpropint(node, "icache-nlines", 64) *
298 getpropint(node, "icache-associativity", 1);
299
300 l = getpropint(node, "dcache-line-size", 0);
301 if (l == 0)
302 l = getpropint(node, "l1-dcache-line-size", 0);
303 cacheinfo.dc_linesize = l;
304 for (i = 0; (1 << i) < l && l; i++)
305 /* void */;
306 if ((1 << i) != l && l)
307 panic("bad dcache line size %d", l);
308 cacheinfo.dc_totalsize = getpropint(node, "dcache-size", 0);
309 if (cacheinfo.dc_totalsize == 0)
310 cacheinfo.dc_totalsize = getpropint(node, "l1-dcache-size", 0);
311 if (cacheinfo.dc_totalsize == 0)
312 cacheinfo.dc_totalsize = l *
313 getpropint(node, "dcache-nlines", 128) *
314 getpropint(node, "dcache-associativity", 1);
315
316 l = getpropint(node, "ecache-line-size", 0);
317 if (l == 0)
318 l = getpropint(node, "l2-cache-line-size", 0);
319 cacheinfo.ec_linesize = l;
320 for (i = 0; (1 << i) < l && l; i++)
321 /* void */;
322 if ((1 << i) != l && l)
323 panic("bad ecache line size %d", l);
324 cacheinfo.ec_totalsize = getpropint(node, "ecache-size", 0);
325 if (cacheinfo.ec_totalsize == 0)
326 cacheinfo.ec_totalsize = getpropint(node, "l2-cache-size", 0);
327 if (cacheinfo.ec_totalsize == 0)
328 cacheinfo.ec_totalsize = l *
329 getpropint(node, "ecache-nlines", 32768) *
330 getpropint(node, "ecache-associativity", 1);
331
332 /*
333 * XXX - The following will have to do until
334 * we have per-cpu cache handling.
335 */
336 if (cacheinfo.ic_totalsize + cacheinfo.dc_totalsize == 0)
337 return;
338
339 sep = " ";
340 printf("%s: physical", dev->dv_xname);
341 if (cacheinfo.ic_totalsize > 0) {
342 printf("%s%ldK instruction (%ld b/l)", sep,
343 (long)cacheinfo.ic_totalsize/1024,
344 (long)cacheinfo.ic_linesize);
345 sep = ", ";
346 }
347 if (cacheinfo.dc_totalsize > 0) {
348 printf("%s%ldK data (%ld b/l)", sep,
349 (long)cacheinfo.dc_totalsize/1024,
350 (long)cacheinfo.dc_linesize);
351 sep = ", ";
352 }
353 if (cacheinfo.ec_totalsize > 0) {
354 printf("%s%ldK external (%ld b/l)", sep,
355 (long)cacheinfo.ec_totalsize/1024,
356 (long)cacheinfo.ec_linesize);
357 }
358
359 #ifndef SMALL_KERNEL
360 if (impl == IMPL_HUMMINGBIRD)
361 hummingbird_init(ci);
362 #endif
363
364 printf("\n");
365 }
366
367 int
cpu_myid(void)368 cpu_myid(void)
369 {
370 char buf[32];
371 int impl;
372
373 #ifdef SUN4V
374 if (CPU_ISSUN4V) {
375 uint64_t myid;
376
377 hv_cpu_myid(&myid);
378 return myid;
379 }
380 #endif
381
382 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
383 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0)
384 return lduwa(0x1fff40000d0UL, ASI_PHYS_NON_CACHED);
385
386 impl = (getver() & VER_IMPL) >> VER_IMPL_SHIFT;
387 switch (impl) {
388 case IMPL_OLYMPUS_C:
389 case IMPL_JUPITER:
390 return CPU_JUPITERID;
391 case IMPL_CHEETAH:
392 case IMPL_CHEETAH_PLUS:
393 case IMPL_JAGUAR:
394 case IMPL_PANTHER:
395 return CPU_FIREPLANEID;
396 default:
397 return CPU_UPAID;
398 }
399 }
400
401 void
cpu_init(struct cpu_info * ci)402 cpu_init(struct cpu_info *ci)
403 {
404 #ifdef SUN4V
405 paddr_t pa = ci->ci_paddr;
406 int err;
407 #endif
408
409 if (CPU_ISSUN4U || CPU_ISSUN4US)
410 return;
411
412 #ifdef SUN4V
413 #define MONDO_QUEUE_SIZE 32
414 #define QUEUE_ENTRY_SIZE 64
415
416 pa += CPUINFO_VA - INTSTACK;
417 pa += PAGE_SIZE;
418
419 ci->ci_cpumq = pa;
420 err = hv_cpu_qconf(CPU_MONDO_QUEUE, ci->ci_cpumq, MONDO_QUEUE_SIZE);
421 if (err != H_EOK)
422 panic("Unable to set cpu mondo queue: %d", err);
423 pa += MONDO_QUEUE_SIZE * QUEUE_ENTRY_SIZE;
424
425 ci->ci_devmq = pa;
426 err = hv_cpu_qconf(DEVICE_MONDO_QUEUE, ci->ci_devmq, MONDO_QUEUE_SIZE);
427 if (err != H_EOK)
428 panic("Unable to set device mondo queue: %d", err);
429 pa += MONDO_QUEUE_SIZE * QUEUE_ENTRY_SIZE;
430
431 ci->ci_mondo = pa;
432 pa += 64;
433
434 ci->ci_cpuset = pa;
435 pa += 64;
436 #endif
437 }
438
439 struct cfdriver cpu_cd = {
440 NULL, "cpu", DV_DULL
441 };
442
443 int
sparc64_cpuspeed(int * freq)444 sparc64_cpuspeed(int *freq)
445 {
446 extern u_int64_t cpu_clockrate[];
447
448 *freq = cpu_clockrate[1];
449 return (0);
450 }
451
452 #ifndef SMALL_KERNEL
453
454 /*
455 * Hummingbird (UltraSPARC-IIe) has a clock control unit that enables
456 * Energy Star mode. This only works in combination with unbuffered
457 * DIMMs so it is not supported on all machines with UltraSPARC-IIe
458 * CPUs.
459 */
460
461 /* Memory_Control_0 (MC0) register. */
462 #define HB_MC0 0x1fe0000f010ULL
463 #define HB_MC0_SELF_REFRESH 0x00010000
464 #define HB_MC0_REFRESH_COUNT_MASK 0x00007f00
465 #define HB_MC0_REFRESH_COUNT_SHIFT 8
466 #define HB_MC0_REFRESH_COUNT(reg) \
467 (((reg) & HB_MC0_REFRESH_COUNT_MASK) >> HB_MC0_REFRESH_COUNT_SHIFT)
468 #define HB_MC0_REFRESH_CLOCKS_PER_COUNT 64ULL
469 #define HB_MC0_REFRESH_INTERVAL 7800ULL
470
471 /* Energy Star register. */
472 #define HB_ESTAR 0x1fe0000f080ULL
473 #define HB_ESTAR_MODE_MASK 0x00000007
474 #define HB_ESTAR_MODE_DIV_1 0x00000000
475 #define HB_ESTAR_MODE_DIV_2 0x00000001
476 #define HB_ESTAR_MODE_DIV_4 0x00000003
477 #define HB_ESTAR_MODE_DIV_6 0x00000002
478 #define HB_ESTAR_MODE_DIV_8 0x00000004
479 #define HB_ESTAR_NUM_MODES 5
480
481 int hummingbird_divisors[HB_ESTAR_NUM_MODES];
482
483 int
hummingbird_div(uint64_t estar_mode)484 hummingbird_div(uint64_t estar_mode)
485 {
486 switch(estar_mode) {
487 case HB_ESTAR_MODE_DIV_1:
488 return 1;
489 case HB_ESTAR_MODE_DIV_2:
490 return 2;
491 case HB_ESTAR_MODE_DIV_4:
492 return 4;
493 case HB_ESTAR_MODE_DIV_6:
494 return 6;
495 case HB_ESTAR_MODE_DIV_8:
496 return 8;
497 default:
498 panic("bad E-Star mode");
499 }
500 }
501
502 uint64_t
hummingbird_estar_mode(int div)503 hummingbird_estar_mode(int div)
504 {
505 switch(div) {
506 case 1:
507 return HB_ESTAR_MODE_DIV_1;
508 case 2:
509 return HB_ESTAR_MODE_DIV_2;
510 case 4:
511 return HB_ESTAR_MODE_DIV_4;
512 case 6:
513 return HB_ESTAR_MODE_DIV_6;
514 case 8:
515 return HB_ESTAR_MODE_DIV_8;
516 default:
517 panic("bad clock divisor");
518 }
519 }
520
521 void
hummingbird_enable_self_refresh(void)522 hummingbird_enable_self_refresh(void)
523 {
524 uint64_t reg;
525
526 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
527 reg |= HB_MC0_SELF_REFRESH;
528 stxa(HB_MC0, ASI_PHYS_NON_CACHED, reg);
529 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
530 }
531
532 void
hummingbird_disable_self_refresh(void)533 hummingbird_disable_self_refresh(void)
534 {
535 uint64_t reg;
536
537 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
538 reg &= ~HB_MC0_SELF_REFRESH;
539 stxa(HB_MC0, ASI_PHYS_NON_CACHED, reg);
540 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
541 }
542
543 void
hummingbird_set_refresh_count(int div,int new_div)544 hummingbird_set_refresh_count(int div, int new_div)
545 {
546 extern u_int64_t cpu_clockrate[];
547 uint64_t count, new_count;
548 uint64_t delta;
549 uint64_t reg;
550
551 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
552 count = HB_MC0_REFRESH_COUNT(reg);
553 new_count = (HB_MC0_REFRESH_INTERVAL * cpu_clockrate[0]) /
554 (HB_MC0_REFRESH_CLOCKS_PER_COUNT * new_div * 1000000000);
555 reg &= ~HB_MC0_REFRESH_COUNT_MASK;
556 reg |= (new_count << HB_MC0_REFRESH_COUNT_SHIFT);
557 stxa(HB_MC0, ASI_PHYS_NON_CACHED, reg);
558 reg = ldxa(HB_MC0, ASI_PHYS_NON_CACHED);
559
560 if (new_div > div && (reg & HB_MC0_SELF_REFRESH) == 0) {
561 delta = HB_MC0_REFRESH_CLOCKS_PER_COUNT *
562 ((count + new_count) * 1000000UL * div) / cpu_clockrate[0];
563 delay(delta + 1);
564 }
565 }
566
567 void
hummingbird_setperf(int level)568 hummingbird_setperf(int level)
569 {
570 extern u_int64_t cpu_clockrate[];
571 uint64_t estar_mode, new_estar_mode;
572 uint64_t reg, s;
573 int div, new_div, i;
574
575 new_estar_mode = HB_ESTAR_MODE_DIV_1;
576 for (i = 0; i < HB_ESTAR_NUM_MODES && hummingbird_divisors[i]; i++) {
577 if (level <= 100 / hummingbird_divisors[i])
578 new_estar_mode =
579 hummingbird_estar_mode(hummingbird_divisors[i]);
580 }
581
582 reg = ldxa(HB_ESTAR, ASI_PHYS_NON_CACHED);
583 estar_mode = reg & HB_ESTAR_MODE_MASK;
584 if (estar_mode == new_estar_mode)
585 return;
586
587 reg &= ~HB_ESTAR_MODE_MASK;
588 div = hummingbird_div(estar_mode);
589 new_div = hummingbird_div(new_estar_mode);
590
591 s = intr_disable();
592 if (estar_mode == HB_ESTAR_MODE_DIV_1 &&
593 new_estar_mode == HB_ESTAR_MODE_DIV_2) {
594 hummingbird_set_refresh_count(1, 2);
595 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | HB_ESTAR_MODE_DIV_2);
596 delay(1);
597 hummingbird_enable_self_refresh();
598 } else if (estar_mode == HB_ESTAR_MODE_DIV_2 &&
599 new_estar_mode == HB_ESTAR_MODE_DIV_1) {
600 hummingbird_disable_self_refresh();
601 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | HB_ESTAR_MODE_DIV_1);
602 delay(1);
603 hummingbird_set_refresh_count(2, 1);
604 } else if (estar_mode == HB_ESTAR_MODE_DIV_1) {
605 /*
606 * Transition to 1/2 speed first, then to
607 * lower speed.
608 */
609 hummingbird_set_refresh_count(1, 2);
610 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | HB_ESTAR_MODE_DIV_2);
611 delay(1);
612 hummingbird_enable_self_refresh();
613
614 hummingbird_set_refresh_count(2, new_div);
615 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | new_estar_mode);
616 delay(1);
617 } else if (new_estar_mode == HB_ESTAR_MODE_DIV_1) {
618 /*
619 * Transition to 1/2 speed first, then to
620 * full speed.
621 */
622 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | HB_ESTAR_MODE_DIV_2);
623 delay(1);
624 hummingbird_set_refresh_count(div, 2);
625
626 hummingbird_disable_self_refresh();
627 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | HB_ESTAR_MODE_DIV_1);
628 delay(1);
629 hummingbird_set_refresh_count(2, 1);
630 } else if (div < new_div) {
631 hummingbird_set_refresh_count(div, new_div);
632 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | new_estar_mode);
633 delay(1);
634 } else if (div > new_div) {
635 stxa(HB_ESTAR, ASI_PHYS_NON_CACHED, reg | new_estar_mode);
636 delay(1);
637 hummingbird_set_refresh_count(div, new_div);
638 }
639 cpu_clockrate[1] = cpu_clockrate[0] / (new_div * 1000000);
640 intr_restore(s);
641 }
642
643 void
hummingbird_init(struct cpu_info * ci)644 hummingbird_init(struct cpu_info *ci)
645 {
646 /*
647 * The "clock-divisors" property seems to indicate which
648 * frequency scalings are supported on a particular model.
649 */
650 if (OF_getprop(ci->ci_node, "clock-divisors",
651 &hummingbird_divisors, sizeof(hummingbird_divisors)) <= 0)
652 return;
653
654 cpu_setperf = hummingbird_setperf;
655 }
656 #endif
657
658 #ifdef MULTIPROCESSOR
659 void cpu_mp_startup(void);
660
661 void
cpu_boot_secondary_processors(void)662 cpu_boot_secondary_processors(void)
663 {
664 struct cpu_info *ci;
665 int cpuid, i;
666 char buf[32];
667
668 if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 &&
669 strcmp(buf, "SUNW,Ultra-Enterprise-10000") == 0) {
670 for (ci = cpus; ci != NULL; ci = ci->ci_next)
671 ci->ci_itid = STARFIRE_UPAID2HWMID(ci->ci_upaid);
672 } else {
673 for (ci = cpus; ci != NULL; ci = ci->ci_next)
674 ci->ci_itid = ci->ci_upaid;
675 }
676
677 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
678 if (ci->ci_upaid == cpu_myid())
679 continue;
680 ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;
681
682 if (CPU_ISSUN4V)
683 cpuid = ci->ci_upaid;
684 else
685 cpuid = getpropint(ci->ci_node, "cpuid", -1);
686
687 if (OF_test("SUNW,start-cpu-by-cpuid") == 0) {
688 prom_start_cpu_by_cpuid(cpuid,
689 (void *)cpu_mp_startup, ci->ci_paddr);
690 } else {
691 prom_start_cpu(ci->ci_node,
692 (void *)cpu_mp_startup, ci->ci_paddr);
693 }
694
695 for (i = 0; i < 2000; i++) {
696 membar_sync();
697 if (ci->ci_flags & CPUF_RUNNING)
698 break;
699 delay(10000);
700 }
701 }
702 }
703
704 void
cpu_hatch(void)705 cpu_hatch(void)
706 {
707 struct cpu_info *ci = curcpu();
708
709 cpu_init(ci);
710
711 ci->ci_flags |= CPUF_RUNNING;
712 membar_sync();
713
714 cpu_start_clock();
715
716 sched_toidle();
717 }
718 #endif
719
720 void
need_resched(struct cpu_info * ci)721 need_resched(struct cpu_info *ci)
722 {
723 ci->ci_want_resched = 1;
724
725 /* There's a risk we'll be called before the idle threads start */
726 if (ci->ci_curproc) {
727 aston(ci->ci_curproc);
728 if (ci != curcpu())
729 cpu_unidle(ci);
730 }
731 }
732
733 /*
734 * Idle loop.
735 *
736 * We disable and reenable the interrupts in every cycle of the idle loop.
737 * Since hv_cpu_yield doesn't actually reenable interrupts, it just wakes
738 * up if an interrupt would have happened, but it's our responsibility to
739 * unblock interrupts.
740 */
741
742 void
cpu_idle_enter(void)743 cpu_idle_enter(void)
744 {
745 if (CPU_ISSUN4V) {
746 sparc_wrpr(pstate, sparc_rdpr(pstate) & ~PSTATE_IE, 0);
747 }
748 }
749
750 void
cpu_idle_cycle(void)751 cpu_idle_cycle(void)
752 {
753 #ifdef SUN4V
754 if (CPU_ISSUN4V) {
755 hv_cpu_yield();
756 sparc_wrpr(pstate, sparc_rdpr(pstate) | PSTATE_IE, 0);
757 sparc_wrpr(pstate, sparc_rdpr(pstate) & ~PSTATE_IE, 0);
758 }
759 #endif
760
761 /*
762 * On processors with multiple threads we simply force a
763 * thread switch. Using the sleep instruction seems to work
764 * just as well as using the suspend instruction and makes the
765 * code a little bit less complicated.
766 */
767 __asm volatile(
768 "999: nop \n"
769 " .section .sun4u_mtp_patch, \"ax\" \n"
770 " .word 999b \n"
771 " .word 0x81b01060 ! sleep \n"
772 " .previous \n"
773 : : : "memory");
774 }
775
776 void
cpu_idle_leave(void)777 cpu_idle_leave(void)
778 {
779 if (CPU_ISSUN4V) {
780 sparc_wrpr(pstate, sparc_rdpr(pstate) | PSTATE_IE, 0);
781 }
782 }
783