1 /*
2 * arm mpcore generic interrupt controller (gic) v1
3 * traps, exceptions, interrupts, system calls.
4 *
5 * there are two pieces: the interrupt distributor and the cpu interface.
6 *
7 * memset or memmove on any of the distributor registers generates an
8 * exception like this one:
9 * panic: external abort 0x28 pc 0xc048bf68 addr 0x50041800
10 *
11 * we use l1 and l2 cache ops to force vectors to be visible everywhere.
12 *
13 * apparently irqs 0—15 (SGIs) are always enabled.
14 */
15 #include "u.h"
16 #include "../port/lib.h"
17 #include "mem.h"
18 #include "dat.h"
19 #include "fns.h"
20 #include "../port/error.h"
21
22 #include "ureg.h"
23 #include "arm.h"
24
25 #define ISSGI(irq) ((uint)(irq) < Nsgi)
26
27 enum {
28 Debug = 0,
29
30 Nvec = 8, /* # of vectors at start of lexception.s */
31 Bi2long = BI2BY * sizeof(long),
32 Nirqs = 1024,
33 Nsgi = 16, /* software-generated (inter-processor) intrs */
34 Nppi = 32, /* sgis + other private peripheral intrs */
35 };
36
37 typedef struct Intrcpuregs Intrcpuregs;
38 typedef struct Intrdistregs Intrdistregs;
39
40 /*
41 * almost this entire register set is buggered.
42 * the distributor is supposed to be per-system, not per-cpu,
43 * yet some registers are banked per-cpu, as marked.
44 */
45 struct Intrdistregs { /* distributor */
46 ulong ctl;
47 ulong ctlrtype;
48 ulong distid;
49 uchar _pad0[0x80 - 0xc];
50
51 /* botch: *[0] are banked per-cpu from here */
52 /* bit maps */
53 ulong grp[32]; /* in group 1 (non-secure) */
54 ulong setena[32]; /* forward to cpu interfaces */
55 ulong clrena[32];
56 ulong setpend[32];
57 ulong clrpend[32];
58 ulong setact[32]; /* active? */
59 ulong clract[32];
60 /* botch: *[0] are banked per-cpu until here */
61
62 uchar pri[1020]; /* botch: pri[0] — pri[7] are banked per-cpu */
63 ulong _rsrvd1;
64 /* botch: targ[0] through targ[7] are banked per-cpu and RO */
65 uchar targ[1020]; /* byte bit maps: cpu targets indexed by intr */
66 ulong _rsrvd2;
67 /* botch: cfg[1] is banked per-cpu */
68 ulong cfg[64]; /* bit pairs: edge? 1-N? */
69 ulong _pad1[64];
70 ulong nsac[64]; /* bit pairs (v2 only) */
71
72 /* software-generated intrs (a.k.a. sgi) */
73 ulong swgen; /* intr targets */
74 uchar _pad2[0xf10 - 0xf04];
75 uchar clrsgipend[16]; /* bit map (v2 only) */
76 uchar setsgipend[16]; /* bit map (v2 only) */
77 };
78
79 enum {
80 /* ctl bits */
81 Forw2cpuif = 1,
82
83 /* ctlrtype bits */
84 Cpunoshft = 5,
85 Cpunomask = MASK(3),
86 Intrlines = MASK(5),
87
88 /* cfg bits */
89 Level = 0<<1,
90 Edge = 1<<1, /* edge-, not level-sensitive */
91 Toall = 0<<0,
92 To1 = 1<<0, /* vs. to all */
93
94 /* swgen bits */
95 Totargets = 0,
96 Tonotme = 1<<24,
97 Tome = 2<<24,
98 };
99
100 /* each cpu sees its own registers at the same base address (soc.intr) */
101 struct Intrcpuregs {
102 ulong ctl;
103 ulong primask;
104
105 ulong binpt; /* group pri vs subpri split */
106 ulong ack;
107 ulong end;
108 ulong runpri;
109 ulong hipripend;
110
111 /* aliased regs (secure, for group 1) */
112 ulong alibinpt;
113 ulong aliack; /* (v2 only) */
114 ulong aliend; /* (v2 only) */
115 ulong alihipripend; /* (v2 only) */
116
117 uchar _pad0[0xd0 - 0x2c];
118 ulong actpri[4]; /* (v2 only) */
119 ulong nsactpri[4]; /* (v2 only) */
120
121 uchar _pad0[0xfc - 0xf0];
122 ulong ifid; /* ro */
123
124 uchar _pad0[0x1000 - 0x100];
125 ulong deact; /* wo (v2 only) */
126 };
127
128 enum {
129 /* ctl bits */
130 Enable = 1,
131 Eoinodeact = 1<<9, /* (v2 only) */
132
133 /* (ali) ack/end/hipriend/deact bits */
134 Intrmask = MASK(10),
135 Cpuidshift = 10,
136 Cpuidmask = MASK(3),
137
138 /* ifid bits */
139 Archversshift = 16,
140 Archversmask = MASK(4),
141 };
142
143 typedef struct Vctl Vctl;
144 typedef struct Vctl {
145 Vctl* next; /* handlers on this vector */
146 char *name; /* of driver, xallocated */
147 void (*f)(Ureg*, void*); /* handler to call */
148 void* a; /* argument to call it with */
149 } Vctl;
150
151 static Lock vctllock;
152 static Vctl* vctl[Nirqs];
153
154 /*
155 * Layout at virtual address 0.
156 */
157 typedef struct Vpage0 {
158 void (*vectors[Nvec])(void);
159 u32int vtable[Nvec];
160 } Vpage0;
161
162 enum
163 {
164 Ntimevec = 20 /* number of time buckets for each intr */
165 };
166 ulong intrtimes[Nirqs][Ntimevec];
167
168 uvlong ninterrupt;
169 uvlong ninterruptticks;
170 int irqtooearly = 1;
171
172 static ulong shadena[32]; /* copy of enable bits, saved by intcmaskall */
173 static Lock distlock, nintrlock;
174
175 extern int notify(Ureg*);
176
177 static void dumpstackwithureg(Ureg *ureg);
178
179 void
printrs(int base,ulong word)180 printrs(int base, ulong word)
181 {
182 int bit;
183
184 for (bit = 0; word; bit++, word >>= 1)
185 if (word & 1)
186 iprint(" %d", base + bit);
187 }
188
189 void
dumpintrs(char * what,ulong * bits)190 dumpintrs(char *what, ulong *bits)
191 {
192 int i, first, some;
193 ulong word;
194 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
195
196 first = 1;
197 some = 0;
198 USED(idp);
199 for (i = 0; i < nelem(idp->setpend); i++) {
200 word = bits[i];
201 if (word) {
202 if (first) {
203 first = 0;
204 iprint("%s", what);
205 }
206 some = 1;
207 printrs(i * Bi2long, word);
208 }
209 }
210 if (!some)
211 iprint("%s none", what);
212 iprint("\n");
213 }
214
215 void
dumpintrpend(void)216 dumpintrpend(void)
217 {
218 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
219
220 iprint("\ncpu%d gic regs:\n", m->machno);
221 dumpintrs("group 1", idp->grp);
222 dumpintrs("enabled", idp->setena);
223 dumpintrs("pending", idp->setpend);
224 dumpintrs("active ", idp->setact);
225 }
226
227 /*
228 * keep histogram of interrupt service times
229 */
230 void
intrtime(Mach *,int vno)231 intrtime(Mach*, int vno)
232 {
233 ulong diff;
234 ulong x;
235
236 x = perfticks();
237 diff = x - m->perf.intrts;
238 m->perf.intrts = x;
239
240 m->perf.inintr += diff;
241 if(up == nil && m->perf.inidle > diff)
242 m->perf.inidle -= diff;
243
244 if (m->cpumhz == 0)
245 return; /* don't divide by zero */
246 diff /= m->cpumhz*100; /* quantum = 100µsec */
247 if(diff >= Ntimevec)
248 diff = Ntimevec-1;
249 if ((uint)vno >= Nirqs)
250 vno = Nirqs-1;
251 intrtimes[vno][diff]++;
252 }
253
254 static ulong
intack(Intrcpuregs * icp)255 intack(Intrcpuregs *icp)
256 {
257 return icp->ack & Intrmask;
258 }
259
260 static void
intdismiss(Intrcpuregs * icp,ulong ack)261 intdismiss(Intrcpuregs *icp, ulong ack)
262 {
263 icp->end = ack;
264 coherence();
265 }
266
267 static int
irqinuse(uint irq)268 irqinuse(uint irq)
269 {
270 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
271
272 return idp->setena[irq / Bi2long] & (1 << (irq % Bi2long));
273 }
274
275 void
intcunmask(uint irq)276 intcunmask(uint irq)
277 {
278 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
279
280 ilock(&distlock);
281 idp->setena[irq / Bi2long] = 1 << (irq % Bi2long);
282 iunlock(&distlock);
283 }
284
285 void
intcmask(uint irq)286 intcmask(uint irq)
287 {
288 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
289
290 ilock(&distlock);
291 idp->clrena[irq / Bi2long] = 1 << (irq % Bi2long);
292 iunlock(&distlock);
293 }
294
295 static void
intcmaskall(Intrdistregs * idp)296 intcmaskall(Intrdistregs *idp) /* mask all intrs for all cpus */
297 {
298 int i;
299
300 for (i = 0; i < nelem(idp->setena); i++)
301 shadena[i] = idp->setena[i];
302 for (i = 0; i < nelem(idp->clrena); i++)
303 idp->clrena[i] = ~0;
304 coherence();
305 }
306
307 static void
intcunmaskall(Intrdistregs * idp)308 intcunmaskall(Intrdistregs *idp) /* unused */
309 {
310 int i;
311
312 for (i = 0; i < nelem(idp->setena); i++)
313 idp->setena[i] = shadena[i];
314 coherence();
315 }
316
317 static ulong
permintrs(Intrdistregs * idp,int base,int r)318 permintrs(Intrdistregs *idp, int base, int r)
319 {
320 ulong perms;
321
322 idp->clrena[r] = ~0; /* disable all */
323 coherence();
324 perms = idp->clrena[r];
325 if (perms) {
326 iprint("perm intrs:");
327 printrs(base, perms);
328 iprint("\n");
329 }
330 return perms;
331 }
332
333 static void
intrcfg(Intrdistregs * idp)334 intrcfg(Intrdistregs *idp)
335 {
336 int i, cpumask;
337 ulong pat;
338
339 /* set up all interrupts as level-sensitive, to one cpu (0) */
340 pat = 0;
341 for (i = 0; i < Bi2long; i += 2)
342 pat |= (Level | To1) << i;
343
344 if (m->machno == 0) { /* system-wide & cpu0 cfg */
345 for (i = 0; i < nelem(idp->grp); i++)
346 idp->grp[i] = 0; /* secure */
347 for (i = 0; i < nelem(idp->pri); i++)
348 idp->pri[i] = 0; /* highest priority */
349 /* set up all interrupts as level-sensitive, to one cpu (0) */
350 for (i = 0; i < nelem(idp->cfg); i++)
351 idp->cfg[i] = pat;
352 /* first Nppi are read-only for SGIs and PPIs */
353 cpumask = 1<<0; /* just cpu 0 */
354 navailcpus = getncpus();
355 for (i = Nppi; i < sizeof idp->targ; i++)
356 idp->targ[i] = cpumask;
357 coherence();
358
359 intcmaskall(idp);
360 for (i = 0; i < nelem(idp->clrena); i++) {
361 // permintrs(idp, i * Bi2long, i);
362 idp->clrpend[i] = idp->clract[i] = idp->clrena[i] = ~0;
363 }
364 } else { /* per-cpu config */
365 idp->grp[0] = 0; /* secure */
366 for (i = 0; i < 8; i++)
367 idp->pri[i] = 0; /* highest priority */
368 /* idp->targ[0 through Nppi-1] are supposed to be read-only */
369 for (i = 0; i < Nppi; i++)
370 idp->targ[i] = 1<<m->machno;
371 idp->cfg[1] = pat;
372 coherence();
373
374 // permintrs(idp, i * Bi2long, i);
375 idp->clrpend[0] = idp->clract[0] = idp->clrena[0] = ~0;
376 /* on cpu1, irq Extpmuirq (118) is always pending here */
377 }
378 coherence();
379 }
380
381 void
intrto(int cpu,int irq)382 intrto(int cpu, int irq)
383 {
384 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
385
386 /* first Nppi are read-only for SGIs and the like */
387 ilock(&distlock);
388 idp->targ[irq] = 1 << cpu;
389 iunlock(&distlock);
390 }
391
392 void
intrsto(int cpu)393 intrsto(int cpu) /* unused */
394 {
395 int i;
396 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
397
398 /* first Nppi are read-only for SGIs and the like */
399 for (i = Nppi; i < sizeof idp->targ; i++)
400 intrto(cpu, i);
401 USED(idp);
402 }
403
404 void
intrcpu(int cpu)405 intrcpu(int cpu)
406 {
407 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
408
409 ilock(&distlock);
410 idp->swgen = Totargets | 1 << (cpu + 16) | m->machno;
411 iunlock(&distlock);
412 }
413
414 /*
415 * set up for exceptions
416 */
417 void
trapinit(void)418 trapinit(void)
419 {
420 int s;
421 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
422 Intrcpuregs *icp = (Intrcpuregs *)soc.intr;
423 Vpage0 *vpage0;
424 enum { Vecsize = sizeof vpage0->vectors + sizeof vpage0->vtable, };
425
426 /*
427 * set up the exception vectors, high and low.
428 *
429 * we can't use cache ops on HVECTORS address, since they
430 * work on virtual addresses, and only those that have a
431 * physical address == PADDR(virtual).
432 */
433 if (m->machno == 0) {
434 vpage0 = (Vpage0*)HVECTORS;
435 memmove(vpage0->vectors, vectors, sizeof(vpage0->vectors));
436 memmove(vpage0->vtable, vtable, sizeof(vpage0->vtable));
437
438 vpage0 = (Vpage0*)KADDR(0);
439 memmove(vpage0->vectors, vectors, sizeof(vpage0->vectors));
440 memmove(vpage0->vtable, vtable, sizeof(vpage0->vtable));
441
442 allcache->wbse(vpage0, Vecsize);
443 cacheiinv();
444 }
445
446 /*
447 * set up the stack pointers for the exception modes for this cpu.
448 * they point to small `save areas' in Mach, not actual stacks.
449 */
450 s = splhi(); /* make these modes ignore intrs too */
451 setr13(PsrMfiq, m->sfiq);
452 setr13(PsrMirq, m->sirq);
453 setr13(PsrMmon, m->smon);
454 setr13(PsrMabt, m->sabt);
455 setr13(PsrMund, m->sund);
456 setr13(PsrMsys, m->ssys);
457 splx(s);
458
459 assert((idp->distid & MASK(12)) == 0x43b); /* made by arm */
460 assert((icp->ifid & MASK(12)) == 0x43b); /* made by arm */
461
462 ilock(&distlock);
463 idp->ctl = 0;
464 icp->ctl = 0;
465 coherence();
466
467 intrcfg(idp); /* some per-cpu cfg here */
468
469 icp->ctl = Enable;
470 icp->primask = (uchar)~0; /* let all priorities through */
471 coherence();
472
473 idp->ctl = Forw2cpuif;
474 iunlock(&distlock);
475 }
476
477 void
intrsoff(void)478 intrsoff(void)
479 {
480 ilock(&distlock);
481 intcmaskall((Intrdistregs *)soc.intrdist);
482 iunlock(&distlock);
483 }
484
485 void
intrcpushutdown(void)486 intrcpushutdown(void)
487 {
488 Intrcpuregs *icp = (Intrcpuregs *)soc.intr;
489
490 icp->ctl = 0;
491 icp->primask = 0; /* let no priorities through */
492 coherence();
493 }
494
495 /* called from cpu0 after other cpus are shutdown */
496 void
intrshutdown(void)497 intrshutdown(void)
498 {
499 Intrdistregs *idp = (Intrdistregs *)soc.intrdist;
500
501 intrsoff();
502 idp->ctl = 0;
503 intrcpushutdown();
504 }
505
506 /*
507 * enable an irq interrupt
508 * note that the same private interrupt may be enabled on multiple cpus
509 */
510 int
irqenable(uint irq,void (* f)(Ureg *,void *),void * a,char * name)511 irqenable(uint irq, void (*f)(Ureg*, void*), void* a, char *name)
512 {
513 Vctl *v;
514
515 if(irq >= nelem(vctl))
516 panic("irqenable irq %d", irq);
517
518 if (irqtooearly) {
519 iprint("irqenable for %d %s called too early\n", irq, name);
520 return -1;
521 }
522 /*
523 * if in use, could be a private interrupt on a secondary cpu,
524 * so don't add anything to the vector chain. irqs should
525 * otherwise be one-to-one with devices.
526 */
527 if(!ISSGI(irq) && irqinuse(irq)) {
528 lock(&vctllock);
529 if (vctl[irq] == nil) {
530 dumpintrpend();
531 panic("non-sgi irq %d in use yet no Vctl allocated", irq);
532 }
533 unlock(&vctllock);
534 }
535 /* could be 1st use of this irq or could be an sgi (always in use) */
536 else if (vctl[irq] == nil) {
537 v = malloc(sizeof(Vctl));
538 if (v == nil)
539 panic("irqenable: malloc Vctl");
540 v->f = f;
541 v->a = a;
542 v->name = malloc(strlen(name)+1);
543 if (v->name == nil)
544 panic("irqenable: malloc name");
545 strcpy(v->name, name);
546
547 lock(&vctllock);
548 if (vctl[irq] != nil) {
549 /* allocation race: someone else did it first */
550 free(v->name);
551 free(v);
552 } else {
553 v->next = vctl[irq];
554 vctl[irq] = v;
555 }
556 unlock(&vctllock);
557 }
558 intcunmask(irq);
559 return 0;
560 }
561
562 /*
563 * disable an irq interrupt
564 */
565 int
irqdisable(uint irq,void (* f)(Ureg *,void *),void * a,char * name)566 irqdisable(uint irq, void (*f)(Ureg*, void*), void* a, char *name)
567 {
568 Vctl **vp, *v;
569
570 if(irq >= nelem(vctl))
571 panic("irqdisable irq %d", irq);
572
573 lock(&vctllock);
574 for(vp = &vctl[irq]; v = *vp; vp = &v->next)
575 if (v->f == f && v->a == a && strcmp(v->name, name) == 0){
576 if(Debug)
577 print("irqdisable: remove %s\n", name);
578 *vp = v->next;
579 free(v->name);
580 free(v);
581 break;
582 }
583
584 if(v == nil)
585 print("irqdisable: irq %d, name %s not enabled\n", irq, name);
586 if(vctl[irq] == nil){
587 if(Debug)
588 print("irqdisable: clear icmr bit %d\n", irq);
589 intcmask(irq);
590 }
591 unlock(&vctllock);
592
593 return 0;
594 }
595
596 /*
597 * called by trap to handle access faults
598 */
599 static void
faultarm(Ureg * ureg,uintptr va,int user,int read)600 faultarm(Ureg *ureg, uintptr va, int user, int read)
601 {
602 int n, insyscall;
603
604 if(up == nil) {
605 dumpstackwithureg(ureg);
606 panic("faultarm: cpu%d: nil up, %sing %#p at %#p",
607 m->machno, (read? "read": "writ"), va, ureg->pc);
608 }
609 insyscall = up->insyscall;
610 up->insyscall = 1;
611
612 n = fault(va, read); /* goes spllo */
613 splhi();
614 if(n < 0){
615 char buf[ERRMAX];
616
617 if(!user){
618 dumpstackwithureg(ureg);
619 panic("fault: cpu%d: kernel %sing %#p at %#p",
620 m->machno, read? "read": "writ", va, ureg->pc);
621 }
622 /* don't dump registers; programs suicide all the time */
623 snprint(buf, sizeof buf, "sys: trap: fault %s va=%#p",
624 read? "read": "write", va);
625 postnote(up, 1, buf, NDebug);
626 }
627 up->insyscall = insyscall;
628 }
629
630 /*
631 * called by trap to handle interrupts.
632 * returns true iff a clock interrupt, thus maybe reschedule.
633 */
634 static int
irq(Ureg * ureg)635 irq(Ureg* ureg)
636 {
637 int clockintr, ack;
638 uint irqno, handled, t, ticks;
639 Intrcpuregs *icp = (Intrcpuregs *)soc.intr;
640 Vctl *v;
641
642 ticks = perfticks();
643 handled = 0;
644 ack = intack(icp);
645 irqno = ack & Intrmask;
646
647 if (irqno >= nelem(vctl)) {
648 iprint("trap: irq %d >= # vectors (%d)\n", irqno, nelem(vctl));
649 intdismiss(icp, ack);
650 return 0;
651 }
652
653 if (irqno == Loctmrirq) /* this is a clock intr? */
654 m->inclockintr++; /* yes, count nesting */
655 if(m->machno && m->inclockintr > 1) {
656 iprint("cpu%d: nested clock intrs\n", m->machno);
657 m->inclockintr--;
658 intdismiss(icp, ack);
659 return 0;
660 }
661
662 for(v = vctl[irqno]; v != nil; v = v->next)
663 if (v->f) {
664 if (islo())
665 panic("trap: pl0 before trap handler for %s",
666 v->name);
667 v->f(ureg, v->a);
668 if (islo())
669 panic("trap: %s lowered pl", v->name);
670 // splhi(); /* in case v->f lowered pl */
671 handled++;
672 }
673 if(!handled)
674 if (irqno >= 1022)
675 iprint("cpu%d: ignoring spurious interrupt\n", m->machno);
676 else {
677 intcmask(irqno);
678 iprint("cpu%d: unexpected interrupt %d, now masked\n",
679 m->machno, irqno);
680 }
681 t = perfticks();
682 if (0) { /* left over from another port? */
683 ilock(&nintrlock);
684 ninterrupt++;
685 if(t < ticks)
686 ninterruptticks += ticks-t;
687 else
688 ninterruptticks += t-ticks;
689 iunlock(&nintrlock);
690 }
691 USED(t, ticks);
692 clockintr = m->inclockintr == 1;
693 if (irqno == Loctmrirq)
694 m->inclockintr--;
695
696 intdismiss(icp, ack);
697 intrtime(m, irqno);
698 return clockintr;
699 }
700
701 /*
702 * returns 1 if the instruction writes memory, 0 otherwise
703 */
704 int
writetomem(ulong inst)705 writetomem(ulong inst)
706 {
707 /* swap always write memory */
708 if((inst & 0x0FC00000) == 0x01000000)
709 return 1;
710
711 /* loads and stores are distinguished by bit 20 */
712 if(inst & (1<<20))
713 return 0;
714
715 return 1;
716 }
717
718 static void
datafault(Ureg * ureg,int user)719 datafault(Ureg *ureg, int user)
720 {
721 int x;
722 ulong inst, fsr;
723 uintptr va;
724
725 va = farget();
726
727 if (m->probing && !user) {
728 if (m->trapped++ > 0) {
729 dumpstackwithureg(ureg);
730 panic("trap: recursive probe %#lux", va);
731 }
732 ureg->pc += 4; /* continue after faulting instr'n */
733 return;
734 }
735
736 inst = *(ulong*)(ureg->pc);
737 /* bits 12 and 10 have to be concatenated with status */
738 x = fsrget();
739 fsr = (x>>7) & 0x20 | (x>>6) & 0x10 | x & 0xf;
740 switch(fsr){
741 default:
742 case 0xa: /* ? was under external abort */
743 panic("unknown data fault, 6b fsr %#lux", fsr);
744 break;
745 case 0x0:
746 panic("vector exception at %#lux", ureg->pc);
747 break;
748 case 0x1: /* alignment fault */
749 case 0x3: /* access flag fault (section) */
750 if(user){
751 char buf[ERRMAX];
752
753 snprint(buf, sizeof buf,
754 "sys: alignment: pc %#lux va %#p\n",
755 ureg->pc, va);
756 postnote(up, 1, buf, NDebug);
757 } else {
758 dumpstackwithureg(ureg);
759 panic("kernel alignment: pc %#lux va %#p", ureg->pc, va);
760 }
761 break;
762 case 0x2:
763 panic("terminal exception at %#lux", ureg->pc);
764 break;
765 case 0x4: /* icache maint fault */
766 case 0x6: /* access flag fault (page) */
767 case 0x8: /* precise external abort, non-xlat'n */
768 case 0x28:
769 case 0x16: /* imprecise ext. abort, non-xlt'n */
770 case 0x36:
771 panic("external non-translation abort %#lux pc %#lux addr %#p",
772 fsr, ureg->pc, va);
773 break;
774 case 0xc: /* l1 translation, precise ext. abort */
775 case 0x2c:
776 case 0xe: /* l2 translation, precise ext. abort */
777 case 0x2e:
778 panic("external translation abort %#lux pc %#lux addr %#p",
779 fsr, ureg->pc, va);
780 break;
781 case 0x1c: /* l1 translation, precise parity err */
782 case 0x1e: /* l2 translation, precise parity err */
783 case 0x18: /* imprecise parity or ecc err */
784 panic("translation parity error %#lux pc %#lux addr %#p",
785 fsr, ureg->pc, va);
786 break;
787 case 0x5: /* translation fault, no section entry */
788 case 0x7: /* translation fault, no page entry */
789 faultarm(ureg, va, user, !writetomem(inst));
790 break;
791 case 0x9:
792 case 0xb:
793 /* domain fault, accessing something we shouldn't */
794 if(user){
795 char buf[ERRMAX];
796
797 snprint(buf, sizeof buf,
798 "sys: access violation: pc %#lux va %#p\n",
799 ureg->pc, va);
800 postnote(up, 1, buf, NDebug);
801 } else
802 panic("kernel access violation: pc %#lux va %#p",
803 ureg->pc, va);
804 break;
805 case 0xd:
806 case 0xf:
807 /* permission error, copy on write or real permission error */
808 faultarm(ureg, va, user, !writetomem(inst));
809 break;
810 }
811 }
812
813 /*
814 * here on all exceptions other than syscall (SWI) and reset
815 */
816 void
trap(Ureg * ureg)817 trap(Ureg *ureg)
818 {
819 int clockintr, user, rem;
820 uintptr va, ifar, ifsr;
821
822 splhi(); /* paranoia */
823 if(up != nil)
824 rem = ((char*)ureg)-up->kstack;
825 else
826 rem = ((char*)ureg)-((char*)m+sizeof(Mach));
827 if(rem < 1024) {
828 iprint("trap: %d stack bytes left, up %#p ureg %#p m %#p cpu%d at pc %#lux\n",
829 rem, up, ureg, m, m->machno, ureg->pc);
830 dumpstackwithureg(ureg);
831 panic("trap: %d stack bytes left, up %#p ureg %#p at pc %#lux",
832 rem, up, ureg, ureg->pc);
833 }
834
835 m->perf.intrts = perfticks();
836 user = (ureg->psr & PsrMask) == PsrMusr;
837 if(user){
838 up->dbgreg = ureg;
839 cycles(&up->kentry);
840 }
841
842 /*
843 * All interrupts/exceptions should be resumed at ureg->pc-4,
844 * except for Data Abort which resumes at ureg->pc-8.
845 */
846 if(ureg->type == (PsrMabt+1))
847 ureg->pc -= 8;
848 else
849 ureg->pc -= 4;
850
851 clockintr = 0; /* if set, may call sched() before return */
852 switch(ureg->type){
853 default:
854 panic("unknown trap; type %#lux, psr mode %#lux", ureg->type,
855 ureg->psr & PsrMask);
856 break;
857 case PsrMirq:
858 m->intr++;
859 clockintr = irq(ureg);
860 if(0 && up && !clockintr)
861 preempted(); /* this causes spurious suicides */
862 break;
863 case PsrMabt: /* prefetch (instruction) fault */
864 va = ureg->pc;
865 ifsr = cprdsc(0, CpFSR, 0, CpIFSR);
866 ifsr = (ifsr>>7) & 0x8 | ifsr & 0x7;
867 switch(ifsr){
868 case 0x02: /* instruction debug event (BKPT) */
869 if(user)
870 postnote(up, 1, "sys: breakpoint", NDebug);
871 else{
872 iprint("kernel bkpt: pc %#lux inst %#ux\n",
873 va, *(u32int*)va);
874 panic("kernel bkpt");
875 }
876 break;
877 default:
878 ifar = cprdsc(0, CpFAR, 0, CpIFAR);
879 if (va != ifar)
880 iprint("trap: cpu%d: i-fault va %#p != ifar %#p\n",
881 m->machno, va, ifar);
882 faultarm(ureg, va, user, 1);
883 break;
884 }
885 break;
886 case PsrMabt+1: /* data fault */
887 datafault(ureg, user);
888 break;
889 case PsrMund: /* undefined instruction */
890 if(!user) {
891 if (ureg->pc & 3) {
892 iprint("rounding fault pc %#lux down to word\n",
893 ureg->pc);
894 ureg->pc &= ~3;
895 }
896 if (Debug)
897 iprint("mathemu: cpu%d fpon %d instr %#8.8lux at %#p\n",
898 m->machno, m->fpon, *(ulong *)ureg->pc,
899 ureg->pc);
900 dumpstackwithureg(ureg);
901 panic("cpu%d: undefined instruction: pc %#lux inst %#ux",
902 m->machno, ureg->pc, ((u32int*)ureg->pc)[0]);
903 } else if(seg(up, ureg->pc, 0) != nil &&
904 *(u32int*)ureg->pc == 0xD1200070)
905 postnote(up, 1, "sys: breakpoint", NDebug);
906 else if(fpuemu(ureg) == 0){ /* didn't find any FP instrs? */
907 char buf[ERRMAX];
908
909 snprint(buf, sizeof buf,
910 "undefined instruction: pc %#lux instr %#8.8lux\n",
911 ureg->pc, *(ulong *)ureg->pc);
912 postnote(up, 1, buf, NDebug);
913 }
914 break;
915 }
916 splhi();
917
918 /* delaysched set because we held a lock or because our quantum ended */
919 if(up && up->delaysched && clockintr){
920 sched(); /* can cause more traps */
921 splhi();
922 }
923
924 if(user){
925 if(up->procctl || up->nnote)
926 notify(ureg);
927 kexit(ureg);
928 }
929 }
930
931 /*
932 * Fill in enough of Ureg to get a stack trace, and call a function.
933 * Used by debugging interface rdb.
934 */
935 void
callwithureg(void (* fn)(Ureg *))936 callwithureg(void (*fn)(Ureg*))
937 {
938 Ureg ureg;
939
940 memset(&ureg, 0, sizeof ureg);
941 ureg.pc = getcallerpc(&fn);
942 ureg.sp = PTR2UINT(&fn);
943 fn(&ureg);
944 }
945
946 static void
dumpstackwithureg(Ureg * ureg)947 dumpstackwithureg(Ureg *ureg)
948 {
949 int x;
950 uintptr l, v, i, estack;
951 char *s;
952
953 dumpregs(ureg);
954 if((s = getconf("*nodumpstack")) != nil && strcmp(s, "0") != 0){
955 iprint("dumpstack disabled\n");
956 return;
957 }
958 delay(1000);
959 iprint("dumpstack\n");
960
961 x = 0;
962 x += iprint("ktrace /kernel/path %#.8lux %#.8lux %#.8lux # pc, sp, link\n",
963 ureg->pc, ureg->sp, ureg->r14);
964 delay(20);
965 i = 0;
966 if(up
967 && (uintptr)&l >= (uintptr)up->kstack
968 && (uintptr)&l <= (uintptr)up->kstack+KSTACK)
969 estack = (uintptr)up->kstack+KSTACK;
970 else if((uintptr)&l >= (uintptr)m->stack
971 && (uintptr)&l <= (uintptr)m+MACHSIZE)
972 estack = (uintptr)m+MACHSIZE;
973 else
974 return;
975 x += iprint("estackx %p\n", estack);
976
977 for(l = (uintptr)&l; l < estack; l += sizeof(uintptr)){
978 v = *(uintptr*)l;
979 if((KTZERO < v && v < (uintptr)etext) || estack-l < 32){
980 x += iprint("%.8p ", v);
981 delay(20);
982 i++;
983 }
984 if(i == 8){
985 i = 0;
986 x += iprint("\n");
987 delay(20);
988 }
989 }
990 if(i)
991 iprint("\n");
992 delay(3000);
993 }
994
995 void
dumpstack(void)996 dumpstack(void)
997 {
998 callwithureg(dumpstackwithureg);
999 }
1000
1001 /*
1002 * dump system control coprocessor registers
1003 */
1004 static void
dumpscr(void)1005 dumpscr(void)
1006 {
1007 iprint("0:\t%#8.8ux id\n", cpidget());
1008 iprint("\t%8.8#ux ct\n", cpctget());
1009 iprint("1:\t%#8.8ux control\n", controlget());
1010 iprint("2:\t%#8.8ux ttb\n", ttbget());
1011 iprint("3:\t%#8.8ux dac\n", dacget());
1012 iprint("4:\t(reserved)\n");
1013 iprint("5:\t%#8.8ux fsr\n", fsrget());
1014 iprint("6:\t%#8.8ux far\n", farget());
1015 iprint("7:\twrite-only cache\n");
1016 iprint("8:\twrite-only tlb\n");
1017 iprint("13:\t%#8.8ux pid\n", pidget());
1018 delay(10);
1019 }
1020
1021 /*
1022 * dump general registers
1023 */
1024 static void
dumpgpr(Ureg * ureg)1025 dumpgpr(Ureg* ureg)
1026 {
1027 if(up != nil)
1028 iprint("cpu%d: registers for %s %lud\n",
1029 m->machno, up->text, up->pid);
1030 else
1031 iprint("cpu%d: registers for kernel\n", m->machno);
1032
1033 delay(20);
1034 iprint("%#8.8lux\tr0\n", ureg->r0);
1035 iprint("%#8.8lux\tr1\n", ureg->r1);
1036 iprint("%#8.8lux\tr2\n", ureg->r2);
1037 delay(20);
1038 iprint("%#8.8lux\tr3\n", ureg->r3);
1039 iprint("%#8.8lux\tr4\n", ureg->r4);
1040 iprint("%#8.8lux\tr5\n", ureg->r5);
1041 delay(20);
1042 iprint("%#8.8lux\tr6\n", ureg->r6);
1043 iprint("%#8.8lux\tr7\n", ureg->r7);
1044 iprint("%#8.8lux\tr8\n", ureg->r8);
1045 delay(20);
1046 iprint("%#8.8lux\tr9 (up)\n", ureg->r9);
1047 iprint("%#8.8lux\tr10 (m)\n", ureg->r10);
1048 iprint("%#8.8lux\tr11 (loader temporary)\n", ureg->r11);
1049 iprint("%#8.8lux\tr12 (SB)\n", ureg->r12);
1050 delay(20);
1051 iprint("%#8.8lux\tr13 (sp)\n", ureg->r13);
1052 iprint("%#8.8lux\tr14 (link)\n", ureg->r14);
1053 iprint("%#8.8lux\tr15 (pc)\n", ureg->pc);
1054 delay(20);
1055 iprint("%10.10lud\ttype\n", ureg->type);
1056 iprint("%#8.8lux\tpsr\n", ureg->psr);
1057 delay(500);
1058 }
1059
1060 void
dumpregs(Ureg * ureg)1061 dumpregs(Ureg* ureg)
1062 {
1063 dumpgpr(ureg);
1064 dumpscr();
1065 }
1066
1067 vlong
probeaddr(uintptr addr)1068 probeaddr(uintptr addr)
1069 {
1070 vlong v;
1071
1072 ilock(&m->probelock);
1073 m->trapped = 0;
1074 m->probing = 1;
1075 coherence();
1076
1077 v = *(ulong *)addr; /* this may cause a fault */
1078 coherence();
1079
1080 m->probing = 0;
1081 if (m->trapped)
1082 v = -1;
1083 iunlock(&m->probelock);
1084 return v;
1085 }
1086