xref: /plan9-contrib/sys/src/9/port/fault.c (revision 7ed32af043a69f7cfb1383e0889169e3ced10c8c)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"../port/error.h"
7 
8 int
fault(ulong addr,int read)9 fault(ulong addr, int read)
10 {
11 	Segment *s;
12 	char *sps;
13 
14 	if(up == nil)
15 		panic("fault: nil up");
16 	if(up->nlocks.ref)
17 		print("fault: addr %#p: nlocks %ld\n", addr, up->nlocks.ref);
18 
19 	sps = up->psstate;
20 	up->psstate = "Fault";
21 	spllo();
22 
23 	m->pfault++;
24 	for(;;) {
25 		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
26 		if(s == 0) {
27 			up->psstate = sps;
28 			return -1;
29 		}
30 
31 		if(!read && (s->type&SG_RONLY)) {
32 			qunlock(&s->lk);
33 			up->psstate = sps;
34 			return -1;
35 		}
36 
37 		if(fixfault(s, addr, read, 1) == 0)	/* qunlocks s->lk */
38 			break;
39 	}
40 
41 	up->psstate = sps;
42 	return 0;
43 }
44 
45 static void
faulterror(char * s,Chan * c,int freemem)46 faulterror(char *s, Chan *c, int freemem)
47 {
48 	char buf[ERRMAX];
49 
50 	if(c && c->path){
51 		snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
52 		s = buf;
53 	}
54 	if(up->nerrlab) {
55 		postnote(up, 1, s, NDebug);
56 		error(s);
57 	}
58 	pexit(s, freemem);
59 }
60 
61 void	(*checkaddr)(ulong, Segment *, Page *);
62 ulong	addr2check;
63 
64 int
fixfault(Segment * s,ulong addr,int read,int doputmmu)65 fixfault(Segment *s, ulong addr, int read, int doputmmu)
66 {
67 	int type;
68 	int ref;
69 	Pte **p, *etp;
70 	ulong mmuphys=0, soff;
71 	Page **pg, *lkp, *new;
72 	Page *(*fn)(Segment*, ulong);
73 
74 	addr &= ~(BY2PG-1);
75 	soff = addr-s->base;
76 	p = &s->map[soff/PTEMAPMEM];
77 	if(*p == 0)
78 		*p = ptealloc();
79 
80 	etp = *p;
81 	pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
82 	type = s->type&SG_TYPE;
83 
84 	if(pg < etp->first)
85 		etp->first = pg;
86 	if(pg > etp->last)
87 		etp->last = pg;
88 
89 	switch(type) {
90 	default:
91 		panic("fault");
92 		break;
93 
94 	case SG_TEXT: 			/* Demand load */
95 		if(pagedout(*pg))
96 			pio(s, addr, soff, pg);
97 
98 		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
99 		(*pg)->modref = PG_REF;
100 		break;
101 
102 	case SG_BSS:
103 	case SG_SHARED:			/* Zero fill on demand */
104 	case SG_STACK:
105 		if(*pg == 0) {
106 			new = newpage(1, &s, addr);
107 			if(s == 0)
108 				return -1;
109 
110 			*pg = new;
111 		}
112 		goto common;
113 
114 	case SG_DATA:
115 	common:			/* Demand load/pagein/copy on write */
116 		if(pagedout(*pg))
117 			pio(s, addr, soff, pg);
118 
119 		/*
120 		 *  It's only possible to copy on write if
121 		 *  we're the only user of the segment.
122 		 */
123 		if(read && conf.copymode == 0 && s->ref == 1) {
124 			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
125 			(*pg)->modref |= PG_REF;
126 			break;
127 		}
128 
129 		lkp = *pg;
130 		lock(lkp);
131 
132 		if(lkp->image == &swapimage)
133 			ref = lkp->ref + swapcount(lkp->daddr);
134 		else
135 			ref = lkp->ref;
136 		if(ref == 1 && lkp->image){
137 			/* save a copy of the original for the image cache */
138 			duppage(lkp);
139 			ref = lkp->ref;
140 		}
141 		unlock(lkp);
142 		if(ref > 1){
143 			new = newpage(0, &s, addr);
144 			if(s == 0)
145 				return -1;
146 			*pg = new;
147 			copypage(lkp, *pg);
148 			putpage(lkp);
149 		}
150 		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
151 		(*pg)->modref = PG_MOD|PG_REF;
152 		break;
153 
154 	case SG_PHYSICAL:
155 		if(*pg == 0) {
156 			fn = s->pseg->pgalloc;
157 			if(fn)
158 				*pg = (*fn)(s, addr);
159 			else {
160 				new = smalloc(sizeof(Page));
161 				new->va = addr;
162 				new->pa = s->pseg->pa+(addr-s->base);
163 				new->ref = 1;
164 				*pg = new;
165 			}
166 		}
167 
168 		if (checkaddr && addr == addr2check)
169 			(*checkaddr)(addr, s, *pg);
170 		mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
171 		(*pg)->modref = PG_MOD|PG_REF;
172 		break;
173 	}
174 	qunlock(&s->lk);
175 
176 	if(doputmmu)
177 		putmmu(addr, mmuphys, *pg);
178 
179 	return 0;
180 }
181 
182 void
pio(Segment * s,ulong addr,ulong soff,Page ** p)183 pio(Segment *s, ulong addr, ulong soff, Page **p)
184 {
185 	Page *new;
186 	KMap *k;
187 	Chan *c;
188 	int n, ask;
189 	char *kaddr;
190 	ulong daddr;
191 	Page *loadrec;
192 
193 retry:
194 	loadrec = *p;
195 	if(loadrec == 0) {	/* from a text/data image */
196 		daddr = s->fstart+soff;
197 		new = lookpage(s->image, daddr);
198 		if(new != nil) {
199 			*p = new;
200 			return;
201 		}
202 
203 		c = s->image->c;
204 		ask = s->flen-soff;
205 		if(ask > BY2PG)
206 			ask = BY2PG;
207 	}
208 	else {			/* from a swap image */
209 		daddr = swapaddr(loadrec);
210 		new = lookpage(&swapimage, daddr);
211 		if(new != nil) {
212 			putswap(loadrec);
213 			*p = new;
214 			return;
215 		}
216 
217 		c = swapimage.c;
218 		ask = BY2PG;
219 	}
220 	qunlock(&s->lk);
221 
222 	new = newpage(0, 0, addr);
223 	k = kmap(new);
224 	kaddr = (char*)VA(k);
225 
226 	while(waserror()) {
227 		if(strcmp(up->errstr, Eintr) == 0)
228 			continue;
229 		kunmap(k);
230 		putpage(new);
231 		faulterror(Eioload, c, 0);
232 	}
233 
234 	n = devtab[c->type]->read(c, kaddr, ask, daddr);
235 	if(n != ask)
236 		faulterror(Eioload, c, 0);
237 	if(ask < BY2PG)
238 		memset(kaddr+ask, 0, BY2PG-ask);
239 
240 	poperror();
241 	kunmap(k);
242 	qlock(&s->lk);
243 	if(loadrec == 0) {	/* This is demand load */
244 		/*
245 		 *  race, another proc may have gotten here first while
246 		 *  s->lk was unlocked
247 		 */
248 		if(*p == 0) {
249 			new->daddr = daddr;
250 			cachepage(new, s->image);
251 			*p = new;
252 		}
253 		else
254 			putpage(new);
255 	}
256 	else {			/* This is paged out */
257 		/*
258 		 *  race, another proc may have gotten here first
259 		 *  (and the pager may have run on that page) while
260 		 *  s->lk was unlocked
261 		 */
262 		if(*p != loadrec){
263 			if(!pagedout(*p)){
264 				/* another process did it for me */
265 				putpage(new);
266 				goto done;
267 			} else {
268 				/* another process and the pager got in */
269 				putpage(new);
270 				goto retry;
271 			}
272 		}
273 
274 		new->daddr = daddr;
275 		cachepage(new, &swapimage);
276 		*p = new;
277 		putswap(loadrec);
278 	}
279 
280 done:
281 	if(s->flushme)
282 		memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl));
283 }
284 
285 /*
286  * Called only in a system call
287  */
288 int
okaddr(ulong addr,ulong len,int write)289 okaddr(ulong addr, ulong len, int write)
290 {
291 	Segment *s;
292 
293 	/* second test is paranoia only needed on 64-bit systems */
294 	if((long)len >= 0 && addr+len >= addr)
295 		while ((s = seg(up, addr, 0)) != nil &&
296 		    (!write || !(s->type&SG_RONLY))) {
297 			if((uvlong)addr+len <= s->top)
298 				return 1;
299 			len -= s->top - addr;
300 			addr = s->top;
301 		}
302 	pprint("suicide: invalid address %#lux/%lud in sys call pc=%#lux\n",
303 		addr, len, userpc());
304 	return 0;
305 }
306 
307 void
validaddr(ulong addr,ulong len,int write)308 validaddr(ulong addr, ulong len, int write)
309 {
310 	if(!okaddr(addr, len, write)){
311 		postnote(up, 1, "sys: bad address in syscall", NDebug);
312 		error(Ebadarg);
313 	}
314 }
315 
316 /*
317  * &s[0] is known to be a valid address.
318  */
319 void*
vmemchr(void * s,int c,int n)320 vmemchr(void *s, int c, int n)
321 {
322 	int m;
323 	ulong a;
324 	void *t;
325 
326 	a = (ulong)s;
327 	while(PGROUND(a) != PGROUND(a+n-1)){
328 		/* spans pages; handle this page */
329 		m = BY2PG - (a & (BY2PG-1));
330 		t = memchr((void*)a, c, m);
331 		if(t)
332 			return t;
333 		a += m;
334 		n -= m;
335 		if(a < KZERO)
336 			validaddr(a, 1, 0);
337 	}
338 
339 	/* fits in one page */
340 	return memchr((void*)a, c, n);
341 }
342 
343 Segment*
seg(Proc * p,ulong addr,int dolock)344 seg(Proc *p, ulong addr, int dolock)
345 {
346 	Segment **s, **et, *n;
347 
348 	et = &p->seg[NSEG];
349 	for(s = p->seg; s < et; s++) {
350 		n = *s;
351 		if(n == 0)
352 			continue;
353 		if(addr >= n->base && addr < n->top) {
354 			if(dolock == 0)
355 				return n;
356 
357 			qlock(&n->lk);
358 			if(addr >= n->base && addr < n->top)
359 				return n;
360 			qunlock(&n->lk);
361 		}
362 	}
363 
364 	return 0;
365 }
366 
367 extern void checkmmu(ulong, ulong);
368 void
checkpages(void)369 checkpages(void)
370 {
371 	int checked;
372 	ulong addr, off;
373 	Pte *p;
374 	Page *pg;
375 	Segment **sp, **ep, *s;
376 
377 	if(up == nil)
378 		return;
379 
380 	checked = 0;
381 	for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
382 		s = *sp;
383 		if(s == nil)
384 			continue;
385 		qlock(&s->lk);
386 		for(addr=s->base; addr<s->top; addr+=BY2PG){
387 			off = addr - s->base;
388 			p = s->map[off/PTEMAPMEM];
389 			if(p == 0)
390 				continue;
391 			pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG];
392 			if(pg == 0 || pagedout(pg))
393 				continue;
394 			checkmmu(addr, pg->pa);
395 			checked++;
396 		}
397 		qunlock(&s->lk);
398 	}
399 	print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked);
400 }
401