1 /* $NetBSD: cpu.c,v 1.31 2011/06/18 08:21:20 matt Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.31 2011/06/18 08:21:20 matt Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/device.h> 44 #include <sys/evcnt.h> 45 #include <sys/cpu.h> 46 47 #include <uvm/uvm_extern.h> 48 49 #include <prop/proplib.h> 50 51 #include <powerpc/ibm4xx/cpu.h> 52 #include <powerpc/ibm4xx/dev/plbvar.h> 53 54 struct cputab { 55 u_int version; 56 u_int mask; 57 const char *name; 58 }; 59 static const struct cputab models[] = { 60 { PVR_401A1, 0xffff0000, "401A1" }, 61 { PVR_401B2, 0xffff0000, "401B21" }, 62 { PVR_401C2, 0xffff0000, "401C2" }, 63 { PVR_401D2, 0xffff0000, "401D2" }, 64 { PVR_401E2, 0xffff0000, "401E2" }, 65 { PVR_401F2, 0xffff0000, "401F2" }, 66 { PVR_401G2, 0xffff0000, "401G2" }, 67 { PVR_403, 0xffff0000, "403" }, 68 { PVR_405GP, 0xffff0000, "405GP" }, 69 { PVR_405GPR, 0xffff0000, "405GPr" }, 70 { PVR_405D5X1, 0xfffff000, "Xilinx Virtex II Pro" }, 71 { PVR_405D5X2, 0xfffff000, "Xilinx Virtex 4 FX" }, 72 { PVR_405EX, 0xffff0000, "405EX" }, 73 { 0, 0, NULL } 74 }; 75 76 static int cpumatch(device_t, cfdata_t, void *); 77 static void cpuattach(device_t, device_t, void *); 78 79 CFATTACH_DECL_NEW(cpu, 0, 80 cpumatch, cpuattach, NULL, NULL); 81 82 int ncpus; 83 84 struct cpu_info cpu_info[1] = { 85 { 86 /* XXX add more ci_ev_* as we teach 4xx about them */ 87 .ci_ev_clock = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 88 NULL, "cpu0", "clock"), 89 .ci_ev_statclock = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 90 NULL, "cpu0", "stat clock"), 91 .ci_ev_softclock = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 92 NULL, "cpu0", "soft clock"), 93 .ci_ev_softnet = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 94 NULL, "cpu0", "soft net"), 95 .ci_ev_softserial = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 96 NULL, "cpu0", "soft serial"), 97 .ci_curlwp = &lwp0, 98 } 99 }; 100 101 char cpu_model[80]; 102 103 int cpufound = 0; 104 105 static int 106 cpumatch(device_t parent, cfdata_t cf, void *aux) 107 { 108 struct plb_attach_args *paa = aux; 109 110 /* make sure that we're looking for a CPU */ 111 if (strcmp(paa->plb_name, cf->cf_name) != 0) 112 return (0); 113 114 return !cpufound; 115 } 116 117 static void 118 cpuattach(device_t parent, device_t self, void *aux) 119 { 120 const struct cputab *cp = models; 121 u_int pvr; 122 u_int processor_freq; 123 prop_number_t freq; 124 125 freq = prop_dictionary_get(board_properties, "processor-frequency"); 126 KASSERT(freq != NULL); 127 processor_freq = (unsigned int) prop_number_integer_value(freq); 128 129 cpufound++; 130 ncpus++; 131 132 pvr = mfpvr(); 133 while (cp->name) { 134 if ((pvr & cp->mask) == cp->version) 135 break; 136 cp++; 137 } 138 if (cp->name) 139 strcpy(cpu_model, cp->name); 140 else 141 sprintf(cpu_model, "Version 0x%x", pvr); 142 143 printf(": %dMHz %s (PVR 0x%x)\n", processor_freq / 1000 / 1000, 144 cp->name ? cp->name : "unknown model", pvr); 145 146 cpu_probe_cache(); 147 148 /* We would crash later on anyway so just make the reason obvious */ 149 if (curcpu()->ci_ci.icache_size == 0 && 150 curcpu()->ci_ci.dcache_size == 0) 151 panic("%s could not detect cache size", device_xname(self)); 152 153 printf("%s: Instruction cache size %d line size %d\n", 154 device_xname(self), 155 curcpu()->ci_ci.icache_size, curcpu()->ci_ci.icache_line_size); 156 printf("%s: Data cache size %d line size %d\n", 157 device_xname(self), 158 curcpu()->ci_ci.dcache_size, curcpu()->ci_ci.dcache_line_size); 159 } 160 161 /* 162 * This routine must be explicitly called to initialize the 163 * CPU cache information so cache flushe and memcpy operation 164 * work. 165 */ 166 void 167 cpu_probe_cache(void) 168 { 169 const struct cputab *cp = models; 170 u_int pvr; 171 172 pvr = mfpvr(); 173 while (cp->name) { 174 if ((pvr & cp->mask) == cp->version) 175 break; 176 cp++; 177 } 178 179 /* 180 * First we need to identify the CPU and determine the 181 * cache line size, or things like memset/memcpy may lose 182 * badly. 183 */ 184 switch (cp->version) { 185 case PVR_401A1: 186 curcpu()->ci_ci.dcache_size = 1024; 187 curcpu()->ci_ci.dcache_line_size = 16; 188 curcpu()->ci_ci.icache_size = 2848; 189 curcpu()->ci_ci.icache_line_size = 16; 190 break; 191 case PVR_401B2: 192 curcpu()->ci_ci.dcache_size = 8192; 193 curcpu()->ci_ci.dcache_line_size = 16; 194 curcpu()->ci_ci.icache_size = 16384; 195 curcpu()->ci_ci.icache_line_size = 16; 196 break; 197 case PVR_401C2: 198 curcpu()->ci_ci.dcache_size = 8192; 199 curcpu()->ci_ci.dcache_line_size = 16; 200 curcpu()->ci_ci.icache_size = 0; 201 curcpu()->ci_ci.icache_line_size = 16; 202 break; 203 case PVR_401D2: 204 curcpu()->ci_ci.dcache_size = 2848; 205 curcpu()->ci_ci.dcache_line_size = 16; 206 curcpu()->ci_ci.icache_size = 4096; 207 curcpu()->ci_ci.icache_line_size = 16; 208 break; 209 case PVR_401E2: 210 curcpu()->ci_ci.dcache_size = 0; 211 curcpu()->ci_ci.dcache_line_size = 16; 212 curcpu()->ci_ci.icache_size = 0; 213 curcpu()->ci_ci.icache_line_size = 16; 214 break; 215 case PVR_401F2: 216 curcpu()->ci_ci.dcache_size = 2048; 217 curcpu()->ci_ci.dcache_line_size = 16; 218 curcpu()->ci_ci.icache_size = 2848; 219 curcpu()->ci_ci.icache_line_size = 16; 220 break; 221 case PVR_401G2: 222 curcpu()->ci_ci.dcache_size = 2848; 223 curcpu()->ci_ci.dcache_line_size = 16; 224 curcpu()->ci_ci.icache_size = 8192; 225 curcpu()->ci_ci.icache_line_size = 16; 226 break; 227 case PVR_403: 228 curcpu()->ci_ci.dcache_size = 8192; 229 curcpu()->ci_ci.dcache_line_size = 16; 230 curcpu()->ci_ci.icache_size = 16384; 231 curcpu()->ci_ci.icache_line_size = 16; 232 break; 233 case PVR_405GP: 234 curcpu()->ci_ci.dcache_size = 8192; 235 curcpu()->ci_ci.dcache_line_size = 32; 236 curcpu()->ci_ci.icache_size = 8192; 237 curcpu()->ci_ci.icache_line_size = 32; 238 break; 239 case PVR_405GPR: 240 case PVR_405D5X1: 241 case PVR_405D5X2: 242 case PVR_405EX: 243 curcpu()->ci_ci.dcache_size = 16384; 244 curcpu()->ci_ci.dcache_line_size = 32; 245 curcpu()->ci_ci.icache_size = 16384; 246 curcpu()->ci_ci.icache_line_size = 32; 247 break; 248 default: 249 /* 250 * Unknown CPU type. For safety we'll specify a 251 * cache with a 4-byte line size. That way cache 252 * flush routines won't miss any lines. 253 */ 254 curcpu()->ci_ci.dcache_line_size = 4; 255 curcpu()->ci_ci.icache_line_size = 4; 256 break; 257 } 258 } 259 260 /* 261 * These small routines may have to be replaced, 262 * if/when we support processors other that the 604. 263 */ 264 265 void 266 dcache_flush_page(vaddr_t va) 267 { 268 int i; 269 270 if (curcpu()->ci_ci.dcache_line_size) 271 for (i = 0; i < PAGE_SIZE; 272 i += curcpu()->ci_ci.dcache_line_size) 273 __asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 274 __asm volatile("sync;isync" : : ); 275 } 276 277 void 278 icache_flush_page(vaddr_t va) 279 { 280 int i; 281 282 if (curcpu()->ci_ci.icache_line_size) 283 for (i = 0; i < PAGE_SIZE; 284 i += curcpu()->ci_ci.icache_line_size) 285 __asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 286 __asm volatile("sync;isync" : : ); 287 } 288 289 void 290 dcache_flush(vaddr_t va, vsize_t len) 291 { 292 int i; 293 294 if (len == 0) 295 return; 296 297 /* Make sure we flush all cache lines */ 298 len += va & (curcpu()->ci_ci.dcache_line_size-1); 299 if (curcpu()->ci_ci.dcache_line_size) 300 for (i = 0; i < len; i += curcpu()->ci_ci.dcache_line_size) 301 __asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 302 __asm volatile("sync;isync" : : ); 303 } 304 305 void 306 icache_flush(vaddr_t va, vsize_t len) 307 { 308 int i; 309 310 if (len == 0) 311 return; 312 313 /* Make sure we flush all cache lines */ 314 len += va & (curcpu()->ci_ci.icache_line_size-1); 315 if (curcpu()->ci_ci.icache_line_size) 316 for (i = 0; i < len; i += curcpu()->ci_ci.icache_line_size) 317 __asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 318 __asm volatile("sync;isync" : : ); 319 } 320