1 /*
2 * nvidia tegra 2 architecture-specific stuff
3 */
4
5 #include "u.h"
6 #include "../port/lib.h"
7 #include "mem.h"
8 #include "dat.h"
9 #include "fns.h"
10 #include "../port/error.h"
11 #include "io.h"
12 #include "arm.h"
13
14 #include "../port/netif.h"
15 #include "etherif.h"
16 #include "../port/flashif.h"
17 #include "../port/usb.h"
18 #include "../port/portusbehci.h"
19 #include "usbehci.h"
20
21 enum {
22 /* hardware limits imposed by register contents or layouts */
23 Maxcpus = 4,
24 Maxflowcpus = 2,
25
26 Debug = 0,
27 };
28
29 typedef struct Clkrst Clkrst;
30 typedef struct Diag Diag;
31 typedef struct Flow Flow;
32 typedef struct Scu Scu;
33 typedef struct Power Power;
34
35 struct Clkrst {
36 ulong rstsrc;
37 ulong rstdevl;
38 ulong rstdevh;
39 ulong rstdevu;
40
41 ulong clkoutl;
42 ulong clkouth;
43 ulong clkoutu;
44
45 uchar _pad0[0x24-0x1c];
46 ulong supcclkdiv; /* super cclk divider */
47 ulong _pad1;
48 ulong supsclkdiv; /* super sclk divider */
49
50 uchar _pad4[0x4c-0x30];
51 ulong clkcpu;
52
53 uchar _pad1[0xe0-0x50];
54 ulong pllxbase; /* pllx controls CPU clock speed */
55 ulong pllxmisc;
56 ulong pllebase; /* plle is dedicated to pcie */
57 ulong pllemisc;
58
59 uchar _pad2[0x340-0xf0];
60 ulong cpuset;
61 ulong cpuclr;
62 };
63
64 enum {
65 /* rstsrc bits */
66 Wdcpurst = 1<<0,
67 Wdcoprst = 1<<1,
68 Wdsysrst = 1<<2,
69 Wdsel = 1<<4, /* tmr1 or tmr2? */
70 Wdena = 1<<5,
71
72 /* devl bits */
73 Sysreset = 1<<2,
74
75 /* clkcpu bits */
76 Cpu1stop = 1<<9,
77 Cpu0stop = 1<<8,
78
79 /* cpu* bits */
80 Cpu1dbgreset = 1<<13,
81 Cpu0dbgreset = 1<<12,
82 Cpu1wdreset = 1<<9,
83 Cpu0wdreset = 1<<8,
84 Cpu1dereset = 1<<5,
85 Cpu0dereset = 1<<4,
86 Cpu1reset = 1<<1,
87 Cpu0reset = 1<<0,
88 };
89
90 struct Power {
91 ulong ctl; /* mainly for rtc clock signals */
92 ulong secregdis;
93 ulong swrst;
94
95 ulong wakevmask;
96 ulong waklvl;
97 ulong waksts;
98 ulong swwaksts;
99
100 ulong dpdpadsovr; /* deep power down pads override */
101 ulong dpdsample;
102 ulong dpden;
103
104 ulong gatetimroff;
105 ulong gatetimron;
106 ulong toggle;
107 ulong unclamp;
108 ulong gatests; /* ro */
109
110 ulong goodtmr;
111 ulong blinktmr;
112
113 ulong noiopwr;
114 ulong detect;
115 ulong detlatch;
116
117 ulong scratch[24];
118 ulong secscratch[6];
119
120 ulong cpupwrgoodtmr;
121 ulong cpupwrofftmr;
122
123 ulong pgmask[2];
124
125 ulong autowaklvl;
126 ulong autowaklvlmask;
127 ulong wakdelay;
128
129 ulong detval;
130 ulong ddr;
131 ulong usbdebdel; /* usb de-bounce delay */
132 ulong usbao;
133 ulong cryptoop;
134 ulong pllpwb0ovr;
135 ulong scratch24[42-24+1];
136 ulong boundoutmirr[3];
137 ulong sys33ven;
138 ulong boundoutmirracc;
139 ulong gate;
140 };
141
142 enum {
143 /* toggle bits */
144 Start = 1<<8,
145 /* partition ids */
146 Partpcie= 3,
147 Partl2 = 4,
148 };
149
150 struct Scu {
151 ulong ctl;
152 ulong cfg; /* ro */
153 ulong cpupwrsts;
154 ulong inval;
155
156 uchar _pad0[0x40-0x10];
157 ulong filtstart;
158 ulong filtend;
159
160 uchar _pad1[0x50-0x48];
161 ulong accctl; /* initially 0 */
162 ulong nsaccctl;
163 };
164
165 enum {
166 /* ctl bits */
167 Scuenable = 1<<0,
168 Filter = 1<<1,
169 Scuparity = 1<<2,
170 Specfill = 1<<3, /* only for PL310 */
171 Allport0 = 1<<4,
172 Standby = 1<<5,
173 Icstandby = 1<<6,
174 };
175
176 struct Flow {
177 ulong haltcpu0;
178 ulong haltcop;
179 ulong cpu0;
180 ulong cop;
181 ulong xrq;
182 ulong haltcpu1;
183 ulong cpu1;
184 };
185
186 enum {
187 /* haltcpu* bits */
188 Stop = 2<<29,
189
190 /* cpu* bits */
191 Event = 1<<14, /* w1c */
192 Waitwfebitsshift = 4,
193 Waitwfebitsmask = MASK(2),
194 Eventenable = 1<<1,
195 Cpuenable = 1<<0,
196 };
197
198 struct Diag {
199 Cacheline c0;
200 Lock;
201 long cnt;
202 long sync;
203 Cacheline c1;
204 };
205
206 extern ulong testmem;
207
208 /*
209 * number of cpus available. contrast with conf.nmach, which is number
210 * of running cpus.
211 */
212 int navailcpus;
213 Isolated l1ptstable;
214
215 Soc soc = {
216 .clkrst = 0x60006000, /* clock & reset signals */
217 .power = 0x7000e400,
218 .exceptvec = PHYSEVP, /* undocumented magic */
219 .sema = 0x60001000,
220 .l2cache= PHYSL2BAG, /* pl310 bag on the side */
221 .flow = 0x60007000,
222
223 /* 4 non-gic controllers */
224 // .intr = { 0x60004000, 0x60004100, 0x60004200, 0x60004300, },
225
226 /* private memory region */
227 .scu = 0x50040000,
228 /* we got this address from the `cortex-a series programmer's guide'. */
229 .intr = 0x50040100, /* per-cpu interface */
230 .glbtmr = 0x50040200,
231 .loctmr = 0x50040600,
232 .intrdist=0x50041000,
233
234 .uart = { 0x70006000, 0x70006040,
235 0x70006200, 0x70006300, 0x70006400, },
236
237 .rtc = 0x7000e000,
238 .tmr = { 0x60005000, 0x60005008, 0x60005050, 0x60005058, },
239 .µs = 0x60005010,
240
241 .pci = 0x80000000,
242 .ether = 0xa0024000,
243
244 .nand = 0x70008000,
245 .nor = 0x70009000, /* also VIRTNOR */
246
247 .ehci = P2VAHB(0xc5000000), /* 1st of 3 */
248 .ide = P2VAHB(0xc3000000),
249
250 .gpio = { 0x6000d000, 0x6000d080, 0x6000d100, 0x6000d180,
251 0x6000d200, 0x6000d280, 0x6000d300, },
252 .spi = { 0x7000d400, 0x7000d600, 0x7000d800, 0x7000da00, },
253 .twsi = 0x7000c000,
254 .mmc = { P2VAHB(0xc8000000), P2VAHB(0xc8000200),
255 P2VAHB(0xc8000400), P2VAHB(0xc8000600), },
256 };
257
258 static volatile Diag diag;
259 static int missed;
260
261 void
dumpcpuclks(void)262 dumpcpuclks(void) /* run CPU at full speed */
263 {
264 Clkrst *clk = (Clkrst *)soc.clkrst;
265
266 iprint("pllx base %#lux misc %#lux\n", clk->pllxbase, clk->pllxmisc);
267 iprint("plle base %#lux misc %#lux\n", clk->pllebase, clk->pllemisc);
268 iprint("super cclk divider %#lux\n", clk->supcclkdiv);
269 iprint("super sclk divider %#lux\n", clk->supsclkdiv);
270 }
271
272 static char *
devidstr(ulong)273 devidstr(ulong)
274 {
275 return "ARM Cortex-A9";
276 }
277
278 void
archtegralink(void)279 archtegralink(void)
280 {
281 }
282
283 /* convert AddrDevid register to a string in buf and return buf */
284 char *
cputype2name(char * buf,int size)285 cputype2name(char *buf, int size)
286 {
287 ulong r;
288
289 r = cpidget(); /* main id register */
290 assert((r >> 24) == 'A');
291 seprint(buf, buf + size, "Cortex-A9 r%ldp%ld",
292 (r >> 20) & MASK(4), r & MASK(4));
293 return buf;
294 }
295
296 static void
errata(void)297 errata(void)
298 {
299 ulong reg, r, p;
300
301 /* apply cortex-a9 errata workarounds */
302 r = cpidget(); /* main id register */
303 assert((r >> 24) == 'A');
304 p = r & MASK(4); /* minor revision */
305 r >>= 20;
306 r &= MASK(4); /* major revision */
307
308 /* this is an undocumented `diagnostic register' that linux knows */
309 reg = cprdsc(0, CpDTLB, 0, 1);
310 if (r < 2 || r == 2 && p <= 2)
311 reg |= 1<<4; /* 742230 */
312 if (r == 2 && p <= 2)
313 reg |= 1<<6 | 1<<12 | 1<<22; /* 743622, 2×742231 */
314 if (r < 3)
315 reg |= 1<<11; /* 751472 */
316 cpwrsc(0, CpDTLB, 0, 1, reg);
317 }
318
319 void
archconfinit(void)320 archconfinit(void)
321 {
322 char *p;
323 ulong hz;
324
325 assert(m != nil);
326 m->cpuhz = 1000 * Mhz; /* trimslice speed */
327 p = getconf("*cpumhz");
328 if (p) {
329 hz = atoi(p) * Mhz;
330 if (hz >= 100*Mhz && hz <= 3600UL*Mhz)
331 m->cpuhz = hz;
332 }
333 m->delayloop = m->cpuhz/2000; /* initial estimate */
334 errata();
335 }
336
337 int
archether(unsigned ctlrno,Ether * ether)338 archether(unsigned ctlrno, Ether *ether)
339 {
340 switch(ctlrno) {
341 case 0:
342 ether->type = "rtl8169"; /* pci-e ether */
343 ether->ctlrno = ctlrno;
344 ether->irq = Pcieirq; /* non-msi pci-e intr */
345 ether->nopt = 0;
346 ether->mbps = 1000;
347 return 1;
348 }
349 return -1;
350 }
351
352 void
dumpscustate(void)353 dumpscustate(void)
354 {
355 Scu *scu = (Scu *)soc.scu;
356
357 print("cpu%d scu: accctl %#lux\n", m->machno, scu->accctl);
358 print("cpu%d scu: smp cpu bit map %#lo for %ld cpus; ", m->machno,
359 (scu->cfg >> 4) & MASK(4), (scu->cfg & MASK(2)) + 1);
360 print("cpus' power %#lux\n", scu->cpupwrsts);
361 }
362
363 void
scuon(void)364 scuon(void)
365 {
366 Scu *scu = (Scu *)soc.scu;
367
368 if (scu->ctl & Scuenable)
369 return;
370 scu->inval = MASK(16);
371 coherence();
372 scu->ctl = Scuparity | Scuenable | Specfill;
373 coherence();
374 }
375
376 int
getncpus(void)377 getncpus(void)
378 {
379 int n;
380 char *p;
381 Scu *scu;
382
383 if (navailcpus == 0) {
384 scu = (Scu *)soc.scu;
385 navailcpus = (scu->cfg & MASK(2)) + 1;
386 if (navailcpus > MAXMACH)
387 navailcpus = MAXMACH;
388
389 p = getconf("*ncpu");
390 if (p && *p) {
391 n = atoi(p);
392 if (n > 0 && n < navailcpus)
393 navailcpus = n;
394 }
395 }
396 return navailcpus;
397 }
398
399 void
cpuidprint(void)400 cpuidprint(void)
401 {
402 char name[64];
403
404 cputype2name(name, sizeof name);
405 delay(50); /* let uart catch up */
406 iprint("cpu%d: %lldMHz ARM %s %s-endian\n",
407 m->machno, m->cpuhz / Mhz, name,
408 getpsr() & PsrBigend? "big": "little");
409 }
410
411 static void
clockson(void)412 clockson(void)
413 {
414 Clkrst *clk = (Clkrst *)soc.clkrst;
415
416 /* enable all by clearing resets */
417 clk->rstdevl = clk->rstdevh = clk->rstdevu = 0;
418 coherence();
419 clk->clkoutl = clk->clkouth = clk->clkoutu = ~0; /* enable all clocks */
420 coherence();
421
422 clk->rstsrc = Wdcpurst | Wdcoprst | Wdsysrst | Wdena;
423 coherence();
424 }
425
426 /* we could be shutting down ourself (if cpu == m->machno), so take care. */
427 void
stopcpu(uint cpu)428 stopcpu(uint cpu)
429 {
430 Flow *flow = (Flow *)soc.flow;
431 Clkrst *clk = (Clkrst *)soc.clkrst;
432
433 if (cpu == 0) {
434 iprint("stopcpu: may not stop cpu0\n");
435 return;
436 }
437
438 machoff(cpu);
439 lock(&active);
440 active.stopped |= 1 << cpu;
441 unlock(&active);
442 l1cache->wb();
443
444 /* shut down arm7 avp coproc so it can't cause mischief. */
445 /* could try watchdog without stopping avp. */
446 flow->haltcop = Stop;
447 coherence();
448 flow->cop = 0; /* no Cpuenable */
449 coherence();
450 delay(10);
451
452 assert(cpu < Maxflowcpus);
453 *(cpu == 0? &flow->haltcpu0: &flow->haltcpu1) = Stop;
454 coherence();
455 *(cpu == 0? &flow->cpu0: &flow->cpu1) = 0; /* no Cpuenable */
456 coherence();
457 delay(10);
458
459 /* cold reset */
460 assert(cpu < Maxcpus);
461 clk->cpuset = (Cpu0reset | Cpu0dbgreset | Cpu0dereset) << cpu;
462 coherence();
463 delay(1);
464
465 l1cache->wb();
466 }
467
468 static void
synccpus(volatile long * cntp,int n)469 synccpus(volatile long *cntp, int n)
470 {
471 ainc(cntp);
472 while (*cntp < n)
473 ;
474 /* all cpus should now be here */
475 }
476
477 static void
pass1(int pass,volatile Diag * dp)478 pass1(int pass, volatile Diag *dp)
479 {
480 int i;
481
482 if(m->machno == 0)
483 iprint(" %d", pass);
484 for (i = 1000*1000; --i > 0; ) {
485 ainc(&dp->cnt);
486 adec(&dp->cnt);
487 }
488
489 synccpus(&dp->sync, navailcpus);
490 /* all cpus are now here */
491
492 ilock(dp);
493 if(dp->cnt != 0)
494 panic("cpu%d: diag: failed w count %ld", m->machno, dp->cnt);
495 iunlock(dp);
496
497 synccpus(&dp->sync, 2 * navailcpus);
498 /* all cpus are now here */
499 adec(&dp->sync);
500 adec(&dp->sync);
501 }
502
503 /*
504 * try to confirm coherence of l1 caches.
505 * assume that all available cpus will be started.
506 */
507 void
l1diag(void)508 l1diag(void)
509 {
510 int pass;
511 volatile Diag *dp;
512
513 if (!Debug)
514 return;
515
516 l1cache->wb();
517
518 /*
519 * synchronise and print
520 */
521 dp = &diag;
522 ilock(dp);
523 if (m->machno == 0)
524 iprint("l1: waiting for %d cpus... ", navailcpus);
525 iunlock(dp);
526
527 synccpus(&dp->sync, navailcpus);
528
529 ilock(dp);
530 if (m->machno == 0)
531 iprint("cache coherency pass");
532 iunlock(dp);
533
534 synccpus(&dp->sync, 2 * navailcpus);
535 adec(&dp->sync);
536 adec(&dp->sync);
537
538 /*
539 * cpus contend
540 */
541 for (pass = 0; pass < 3; pass++)
542 pass1(pass, dp);
543
544 /*
545 * synchronise and check sanity
546 */
547 synccpus(&dp->sync, navailcpus);
548
549 if(dp->sync < navailcpus || dp->sync >= 2 * navailcpus)
550 panic("cpu%d: diag: failed w dp->sync %ld", m->machno,
551 dp->sync);
552 if(dp->cnt != 0)
553 panic("cpu%d: diag: failed w dp->cnt %ld", m->machno,
554 dp->cnt);
555
556 ilock(dp);
557 iprint(" cpu%d ok", m->machno);
558 iunlock(dp);
559
560 synccpus(&dp->sync, 2 * navailcpus);
561 adec(&dp->sync);
562 adec(&dp->sync);
563 l1cache->wb();
564
565 /*
566 * all done, print
567 */
568 ilock(dp);
569 if (m->machno == 0)
570 iprint("\n");
571 iunlock(dp);
572 }
573
574 static void
unfreeze(uint cpu)575 unfreeze(uint cpu)
576 {
577 Clkrst *clk = (Clkrst *)soc.clkrst;
578 Flow *flow = (Flow *)soc.flow;
579
580 assert(cpu < Maxcpus);
581
582 clk->clkcpu &= ~(Cpu0stop << cpu);
583 coherence();
584 /* out of reset */
585 clk->cpuclr = (Cpu0reset | Cpu0wdreset | Cpu0dbgreset | Cpu0dereset) <<
586 cpu;
587 coherence();
588
589 assert(cpu < Maxflowcpus);
590 *(cpu == 0? &flow->cpu0: &flow->cpu1) = 0;
591 coherence();
592 *(cpu == 0? &flow->haltcpu0: &flow->haltcpu1) = 0; /* normal operat'n */
593 coherence();
594 }
595
596 /*
597 * this is all a bit magic. the soc.exceptvec register is effectively
598 * undocumented. we had to look at linux and experiment, alas. this is the
599 * sort of thing that should be standardised as part of the cortex mpcore spec.
600 * even intel document their equivalent procedure.
601 */
602 int
startcpu(uint cpu)603 startcpu(uint cpu)
604 {
605 int i, r;
606 ulong oldvec, rstaddr;
607 ulong *evp = (ulong *)soc.exceptvec; /* magic */
608
609 r = 0;
610 if (getncpus() < 2 || cpu == m->machno ||
611 cpu >= MAXMACH || cpu >= navailcpus)
612 return -1;
613
614 oldvec = *evp;
615 l1cache->wb(); /* start next cpu w same view of ram */
616 *evp = rstaddr = PADDR(_vrst); /* will start cpu executing at _vrst */
617 coherence();
618 l1cache->wb();
619 unfreeze(cpu);
620
621 for (i = 2000; i > 0 && *evp == rstaddr; i--)
622 delay(1);
623 if (i <= 0 || *evp != cpu) {
624 iprint("cpu%d: didn't start!\n", cpu);
625 stopcpu(cpu); /* make sure it's stopped */
626 r = -1;
627 }
628 *evp = oldvec;
629 return r;
630 }
631
632 static void
cksecure(void)633 cksecure(void)
634 {
635 ulong db;
636 extern ulong getdebug(void);
637
638 if (getscr() & 1)
639 panic("cpu%d: running non-secure", m->machno);
640 db = getdebug();
641 if (db)
642 iprint("cpu%d: debug enable reg %#lux\n", m->machno, db);
643 }
644
645 ulong
smpon(void)646 smpon(void)
647 {
648 ulong aux;
649
650 /* cortex-a9 model-specific configuration */
651 aux = getauxctl();
652 putauxctl(aux | CpACsmp | CpACmaintbcast);
653 return aux;
654 }
655
656 void
cortexa9cachecfg(void)657 cortexa9cachecfg(void)
658 {
659 /* cortex-a9 model-specific configuration */
660 putauxctl(getauxctl() | CpACparity | CpAClwr0line | CpACl2pref);
661 }
662
663 /*
664 * called on a cpu other than 0 from cpureset in l.s,
665 * from _vrst in lexception.s.
666 * mmu and l1 (and system-wide l2) caches and coherency (smpon) are on,
667 * but interrupts are disabled.
668 * our mmu is using an exact copy of cpu0's l1 page table
669 * as it was after userinit ran.
670 */
671 void
cpustart(void)672 cpustart(void)
673 {
674 int ms;
675 ulong *evp;
676 Power *pwr;
677
678 up = nil;
679 if (active.machs & (1<<m->machno)) {
680 serialputc('?');
681 serialputc('r');
682 panic("cpu%d: resetting after start", m->machno);
683 }
684 assert(m->machno != 0);
685
686 errata();
687 cortexa9cachecfg();
688 memdiag(&testmem);
689
690 machinit(); /* bumps nmach, adds bit to machs */
691 machoff(m->machno); /* not ready to go yet */
692
693 /* clock signals and scu are system-wide and already on */
694 clockshutdown(); /* kill any watch-dog timer */
695
696 trapinit();
697 clockinit(); /* sets loop delay */
698 timersinit();
699 cpuidprint();
700
701 /*
702 * notify cpu0 that we're up so it can proceed to l1diag.
703 */
704 evp = (ulong *)soc.exceptvec; /* magic */
705 *evp = m->machno;
706 coherence();
707
708 l1diag(); /* contend with other cpus to verify sanity */
709
710 /*
711 * pwr->noiopwr == 0
712 * pwr->detect == 0x1ff (default, all disabled)
713 */
714 pwr = (Power *)soc.power;
715 assert(pwr->gatests == MASK(7)); /* everything has power */
716
717 /*
718 * 8169 has to initialise before we get past this, thus cpu0
719 * has to schedule processes first.
720 */
721 if (Debug)
722 iprint("cpu%d: waiting for 8169\n", m->machno);
723 for (ms = 0; !l1ptstable.word && ms < 5000; ms += 10) {
724 delay(10);
725 cachedinvse(&l1ptstable.word, sizeof l1ptstable.word);
726 }
727 if (!l1ptstable.word)
728 iprint("cpu%d: 8169 unreasonably slow; proceeding\n", m->machno);
729 /* now safe to copy cpu0's l1 pt in mmuinit */
730
731 mmuinit(); /* update our l1 pt from cpu0's */
732 fpon();
733 machon(m->machno); /* now ready to go and be scheduled */
734
735 if (Debug)
736 iprint("cpu%d: scheding\n", m->machno);
737 schedinit();
738 panic("cpu%d: schedinit returned", m->machno);
739 }
740
741 /* mainly used to break out of wfi */
742 void
sgintr(Ureg * ureg,void *)743 sgintr(Ureg *ureg, void *)
744 {
745 iprint("cpu%d: got sgi\n", m->machno);
746 /* try to prod cpu1 into life when it gets stuck */
747 if (m->machno != 0)
748 clockprod(ureg);
749 }
750
751 void
archreset(void)752 archreset(void)
753 {
754 static int beenhere;
755
756 if (beenhere)
757 return;
758 beenhere = 1;
759
760 /* conservative temporary values until archconfinit runs */
761 m->cpuhz = 1000 * Mhz; /* trimslice speed */
762 m->delayloop = m->cpuhz/2000; /* initial estimate */
763
764 prcachecfg();
765
766 clockson();
767 /* all partitions were powered up by u-boot, so needn't do anything */
768 archconfinit();
769 // resetusb();
770 fpon();
771
772 if (irqtooearly)
773 panic("archreset: too early for irqenable");
774 irqenable(Cpu0irq, sgintr, nil, "cpu0");
775 irqenable(Cpu1irq, sgintr, nil, "cpu1");
776 /* ... */
777 }
778
779 void
archreboot(void)780 archreboot(void)
781 {
782 Clkrst *clk = (Clkrst *)soc.clkrst;
783
784 assert(m->machno == 0);
785 iprint("archreboot: reset!\n");
786 delay(20);
787
788 clk->rstdevl |= Sysreset;
789 coherence();
790 delay(500);
791
792 /* shouldn't get here */
793 splhi();
794 iprint("awaiting reset");
795 for(;;) {
796 delay(1000);
797 print(".");
798 }
799 }
800
801 void
kbdinit(void)802 kbdinit(void)
803 {
804 }
805
806 static void
missing(ulong addr,char * name)807 missing(ulong addr, char *name)
808 {
809 static int firstmiss = 1;
810
811 if (addr == 0) {
812 iprint("address zero for %s\n", name);
813 return;
814 }
815 if (probeaddr(addr) >= 0)
816 return;
817 missed++;
818 if (firstmiss) {
819 iprint("missing:");
820 firstmiss = 0;
821 } else
822 iprint(",\n\t");
823 iprint(" %s at %#lux", name, addr);
824 }
825
826 /* verify that all the necessary device registers are accessible */
827 void
chkmissing(void)828 chkmissing(void)
829 {
830 delay(10);
831 missing(KZERO, "dram");
832 missing(soc.intr, "intr ctlr");
833 missing(soc.intrdist, "intr distrib");
834 missing(soc.tmr[0], "tegra timer1");
835 missing(soc.uart[0], "console uart");
836 missing(soc.pci, "pcie");
837 missing(soc.ether, "ether8169");
838 missing(soc.µs, "µs counter");
839 if (missed)
840 iprint("\n");
841 delay(10);
842 }
843
844 void
archflashwp(Flash *,int)845 archflashwp(Flash*, int)
846 {
847 }
848
849 /*
850 * for ../port/devflash.c:/^flashreset
851 * retrieve flash type, virtual base and length and return 0;
852 * return -1 on error (no flash)
853 */
854 int
archflashreset(int bank,Flash * f)855 archflashreset(int bank, Flash *f)
856 {
857 if(bank != 0)
858 return -1;
859 panic("archflashreset: rewrite for nor & nand flash on ts");
860 /*
861 * this is set up for the igepv2 board.
862 */
863 f->type = "onenand";
864 f->addr = (void*)VIRTNOR; /* mapped here by archreset */
865 f->size = 0; /* done by probe */
866 f->width = 1;
867 f->interleave = 0;
868 return 0;
869 }
870