xref: /plan9-contrib/sys/src/9/loongson/faultmips.c (revision a81c3ea0c7f009a3088ab7fe55ea9013d9d77a74)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"ureg.h"
7 #include	"../port/error.h"
8 #include	"io.h"
9 
10 enum {
11 	Debug = 0,
12 };
13 
14 typedef struct Fault Fault;
15 struct Fault {
16 	uintptr	va;
17 	ulong	pid;
18 	uintptr	pc;
19 	int	cnt;
20 	char	*prog;
21 	int	code;
22 };
23 
24 extern char *excname[];
25 
26 static Fault lflt, maxflt;
27 
28 #define	OFR(r)		(uintptr)offsetof(Ureg, r)	/* offset into Ureg of r */
29 #define	REG(ur, r)	*acpureg(ur, r)			/* cpu reg in Ureg */
30 #define REGMASK		MASK(5)
31 
32 static ulong dummyr0;
33 
34 static	int	roff[32] = {
35 	0,       OFR(r1), OFR(r2), OFR(r3),
36 	OFR(r4), OFR(r5), OFR(r6), OFR(r7),
37 	OFR(r8), OFR(r9), OFR(r10), OFR(r11),
38 	OFR(r12), OFR(r13), OFR(r14), OFR(r15),
39 	OFR(r16), OFR(r17), OFR(r18), OFR(r19),
40 	OFR(r20), OFR(r21), OFR(r22), OFR(r23),
41 	OFR(r24), OFR(r25), OFR(r26), OFR(r27),
42 	OFR(r28), OFR(sp),  OFR(r30), OFR(r31),
43 };
44 
45 static ulong *
acpureg(Ureg * ur,int r)46 acpureg(Ureg *ur, int r)
47 {
48 	r &= REGMASK;
49 	if (r == 0 || roff[r] == 0) {
50 		dummyr0 = 0;
51 		return &dummyr0;
52 	}
53 	return (ulong *)((char*)ur + roff[r]);
54 }
55 
56 ulong *
reg(Ureg * ur,int r)57 reg(Ureg *ur, int r)
58 {
59 	return &REG(ur, r);
60 }
61 
62 /*
63  * Ask if the instruction at EPC could have cause this badvaddr
64  */
65 int
tstbadvaddr(Ureg * ur)66 tstbadvaddr(Ureg *ur)
67 {
68 	int rn;
69 	ulong iw, off, ea;
70 
71 	iw = ur->pc;
72 	if(ur->cause & BD)
73 		iw += 4;
74 
75 	if(seg(up, iw, 0) == 0)
76 		return 0;
77 
78 	iw = *(ulong*)iw;
79 
80 /*	print("iw: %#lux\n", iw);	/**/
81 
82 	switch((iw>>26) & 0x3f) {
83 	default:
84 		return 1;
85 	case 0x20:	/* LB */
86 	case 0x24:	/* LBU */
87 			/* LD */
88 	case 0x35:
89 	case 0x36:
90 	case 0x37:	/* LDCz */
91 	case 0x1A:	/* LDL */
92 	case 0x1B:	/* LDR */
93 	case 0x21:	/* LH */
94 	case 0x25:	/* LHU */
95 	case 0x30:	/* LL */
96 	case 0x34:	/* LLD */
97 	case 0x23:	/* LW */
98 	case 0x31:
99 	case 0x32:	/* LWCz possible 0x33 */
100 	case 0x27:	/* LWU */
101 	case 0x22:	/* LWL */
102 	case 0x26:	/* LWR */
103 		break;
104 
105 	case 0x28:	/* SB */
106 	case 0x38:	/* SC */
107 	case 0x3C:	/* SCD */
108 	case 0x3D:
109 	case 0x3E:
110 	case 0x3F:	/* SDCz */
111 	case 0x2C:	/* SDL */
112 	case 0x2D:	/* SDR */
113 	case 0x29:	/* SH */
114 	case 0x2B:	/* SW */
115 	case 0x39:
116 	case 0x3A:	/* SWCz */
117 	case 0x2A:	/* SWL */
118 	case 0x2E:	/* SWR */
119 		break;
120 	}
121 
122 	off = iw & 0xffff;
123 	if(off & 0x8000)
124 		off |= ~0xffff;
125 
126 	rn = (iw>>21) & 0x1f;
127 	ea = *reg(ur, rn);
128 	if(rn == 0)
129 		ea = 0;
130 	ea += off;
131 
132 	/* print("ea %#lux %#lux(R%d) bv %#lux pc %#lux\n", ea, off, rn, ur->badvaddr, ur->pc); /**/
133 
134 	if(ur->badvaddr == ea)
135 		return 0;
136 
137 	return 1;
138 }
139 
140 /*
141  * we think we get consecutive page faults from unlucky combinations of
142  * scheduling and stlb hashes, and they only happen with 16K pages.
143  * however, we also get page faults while servicing the exact same fault.
144  * more than 5 consecutive faults is unusual, now that we have a better
145  * hash function.
146  *
147  * this can be helpful during mmu and cache debugging.
148  */
149 static int
ckfaultstuck(Ureg * ur,int read,int code)150 ckfaultstuck(Ureg *ur, int read, int code)
151 {
152 	uintptr pc, va;
153 
154 	va = ur->badvaddr;
155 	pc = ur->pc;
156 	if (va != lflt.va || up->pid != lflt.pid || pc != lflt.pc ||
157 	    code != lflt.code) {
158 		/* at least one address or cause is different from last time */
159 		lflt.cnt = 1;
160 		lflt.va = va;
161 		lflt.pid = up->pid;
162 		lflt.pc = pc;
163 		lflt.code = code;
164 		return 0;
165 	}
166 	++lflt.cnt;
167 	if (lflt.cnt >= 1000)	/* fixfault() isn't fixing underlying cause? */
168 		panic("fault: %d consecutive faults for va %#p", lflt.cnt, va);
169 	if (lflt.cnt > maxflt.cnt) {
170 		maxflt.cnt = lflt.cnt;
171 		maxflt.va = va;
172 		maxflt.pid = up->pid;
173 		maxflt.pc = pc;
174 		kstrdup(&maxflt.prog, up->text);
175 	}
176 
177 	/* we're servicing that fault now! */
178 	/* adjust the threshold and program name to suit */
179 	if (lflt.cnt < 5 || strncmp(up->text, "8l", 2) != 0)
180 		return 0;
181 	iprint("%d consecutive faults for va %#p at pc %#p in %s "
182 		"pid %ld\n", lflt.cnt, lflt.va, pc, up->text, lflt.pid);
183 	iprint("\t%s: %s%s r31 %#lux tlbvirt %#lux\n",
184 		excname[code], va == pc? "[instruction] ": "",
185 		(read? "read": "write"), ur->r31, tlbvirt());
186 	return 0;
187 }
188 
189 char *
faultsprint(char * p,char * ep)190 faultsprint(char *p, char *ep)
191 {
192 	if (Debug)
193 		p = seprint(p, ep,
194 			"max consecutive faults %d for va %#p in %s\n",
195 			maxflt.cnt, maxflt.va, maxflt.prog);
196 	return p;
197 }
198 
199 /*
200  *  find out fault address and type of access.
201  *  Call common fault handler.
202  */
203 void
faultmips(Ureg * ur,int user,int code)204 faultmips(Ureg *ur, int user, int code)
205 {
206 	int read;
207 	ulong addr;
208 	char *p, buf[ERRMAX];
209 	static int infault, printed;
210 
211 	if (0 && infault && !printed) {
212 		printed = 1;
213 		print("fault: recursive fault (%d deep) pc %#p va %#p\n",
214 			infault+1, ur->pc, ur->badvaddr);
215 	}
216 	infault++;
217 	if(waserror()){
218 		infault--;
219 		nexterror();
220 	}
221 
222 	addr = ur->badvaddr;
223 	addr &= ~(BY2PG-1);
224 
225 	read = !(code==CTLBM || code==CTLBS);
226 
227 /*	print("fault: %s code %d va %#p pc %#p r31 %#lux tlbvirt %#lux\n",
228 		up->text, code, ur->badvaddr, ur->pc, ur->r31, tlbvirt());/**/
229 
230 	if (Debug && ckfaultstuck(ur, read, code) || fault(addr, read) == 0){
231 		infault--;
232 		poperror();
233 		return;
234 	}
235 
236 	infault--;
237 	poperror();
238 
239 	if(tstbadvaddr(ur)) {
240 		print("fault: spurious badvaddr %#lux in %s at pc %#lux\n",
241 			ur->badvaddr, up->text, ur->pc);/**/
242 		return;
243 	}
244 
245 	if(user) {
246 		p = "store";
247 		if(read)
248 			p = "load";
249 		snprint(buf, sizeof buf, "sys: trap: fault %s addr=%#lux r31=%#lux",
250 			p, ur->badvaddr, ur->r31);
251 		postnote(up, 1, buf, NDebug);
252 		return;
253 	}
254 
255 	print("kernel %s vaddr=%#lux\n", excname[code], ur->badvaddr);
256 	print("st=%#lux pc=%#lux r31=%#lux sp=%#lux\n",
257 		ur->status, ur->pc, ur->r31, ur->sp);
258 	dumpregs(ur);
259 	panic("fault");
260 }
261 
262 /*
263  * called in syscallfmt.c, sysfile.c, sysproc.c
264  */
265 void
validalign(uintptr addr,unsigned align)266 validalign(uintptr addr, unsigned align)
267 {
268 	/*
269 	 * Plan 9 is a 32-bit O/S, and the hardware it runs on
270 	 * does not usually have instructions which move 64-bit
271 	 * quantities directly, synthesizing the operations
272 	 * with 32-bit move instructions. Therefore, the compiler
273 	 * (and hardware) usually only enforce 32-bit alignment,
274 	 * if at all.
275 	 *
276 	 * Take this out if the architecture warrants it.
277 	 */
278 	if(align == sizeof(vlong))
279 		align = sizeof(long);
280 
281 	/*
282 	 * Check align is a power of 2, then addr alignment.
283 	 */
284 	if((align != 0 && !(align & (align-1))) && !(addr & (align-1)))
285 		return;
286 	postnote(up, 1, "sys: odd address", NDebug);
287 	error(Ebadarg);
288 	/*NOTREACHED*/
289 }
290