xref: /plan9/sys/src/9/port/fault.c (revision 60014d6756a98ad10929607ca84a1b7488a16cfc)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"../port/error.h"
7 
8 int
9 fault(ulong addr, int read)
10 {
11 	Segment *s;
12 	char *sps;
13 
14 	if(up == nil)
15 		panic("fault: nil up");
16 	if(up->nlocks.ref)
17 		print("fault: nlocks %ld\n", up->nlocks.ref);
18 
19 	sps = up->psstate;
20 	up->psstate = "Fault";
21 	spllo();
22 
23 	m->pfault++;
24 	for(;;) {
25 		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
26 		if(s == 0) {
27 			up->psstate = sps;
28 			return -1;
29 		}
30 
31 		if(!read && (s->type&SG_RONLY)) {
32 			qunlock(&s->lk);
33 			up->psstate = sps;
34 			return -1;
35 		}
36 
37 		if(fixfault(s, addr, read, 1) == 0)
38 			break;
39 	}
40 
41 	up->psstate = sps;
42 	return 0;
43 }
44 
45 static void
46 faulterror(char *s, Chan *c, int freemem)
47 {
48 	char buf[ERRMAX];
49 
50 	if(c && c->path){
51 		snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
52 		s = buf;
53 	}
54 	if(up->nerrlab) {
55 		postnote(up, 1, s, NDebug);
56 		error(s);
57 	}
58 	pexit(s, freemem);
59 }
60 
61 void	(*checkaddr)(ulong, Segment *, Page *);
62 ulong	addr2check;
63 
64 int
65 fixfault(Segment *s, ulong addr, int read, int doputmmu)
66 {
67 	int type;
68 	int ref;
69 	Pte **p, *etp;
70 	ulong mmuphys=0, soff;
71 	Page **pg, *lkp, *new;
72 	Page *(*fn)(Segment*, ulong);
73 
74 	addr &= ~(BY2PG-1);
75 	soff = addr-s->base;
76 	p = &s->map[soff/PTEMAPMEM];
77 	if(*p == 0)
78 		*p = ptealloc();
79 
80 	etp = *p;
81 	pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
82 	type = s->type&SG_TYPE;
83 
84 	if(pg < etp->first)
85 		etp->first = pg;
86 	if(pg > etp->last)
87 		etp->last = pg;
88 
89 	switch(type) {
90 	default:
91 		panic("fault");
92 		break;
93 
94 	case SG_TEXT: 			/* Demand load */
95 		if(pagedout(*pg))
96 			pio(s, addr, soff, pg);
97 
98 		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
99 		(*pg)->modref = PG_REF;
100 		break;
101 
102 	case SG_BSS:
103 	case SG_SHARED:			/* Zero fill on demand */
104 	case SG_STACK:
105 		if(*pg == 0) {
106 			new = newpage(1, &s, addr);
107 			if(s == 0)
108 				return -1;
109 
110 			*pg = new;
111 		}
112 		goto common;
113 
114 	case SG_DATA:
115 	common:			/* Demand load/pagein/copy on write */
116 		if(pagedout(*pg))
117 			pio(s, addr, soff, pg);
118 
119 		/*
120 		 *  It's only possible to copy on write if
121 		 *  we're the only user of the segment.
122 		 */
123 		if(read && conf.copymode == 0 && s->ref == 1) {
124 			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
125 			(*pg)->modref |= PG_REF;
126 			break;
127 		}
128 
129 		lkp = *pg;
130 		lock(lkp);
131 
132 		if(lkp->image == &swapimage)
133 			ref = lkp->ref + swapcount(lkp->daddr);
134 		else
135 			ref = lkp->ref;
136 		if(ref > 1) {
137 			unlock(lkp);
138 
139 			if(swapfull()){
140 				qunlock(&s->lk);
141 				pprint("swap space full\n");
142 				faulterror(Enoswap, nil, 1);
143 			}
144 
145 			new = newpage(0, &s, addr);
146 			if(s == 0)
147 				return -1;
148 			*pg = new;
149 			copypage(lkp, *pg);
150 			putpage(lkp);
151 		}
152 		else {
153 			/* save a copy of the original for the image cache */
154 			if(lkp->image && !swapfull())
155 				duppage(lkp);
156 
157 			unlock(lkp);
158 		}
159 		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
160 		(*pg)->modref = PG_MOD|PG_REF;
161 		break;
162 
163 	case SG_PHYSICAL:
164 		if(*pg == 0) {
165 			fn = s->pseg->pgalloc;
166 			if(fn)
167 				*pg = (*fn)(s, addr);
168 			else {
169 				new = smalloc(sizeof(Page));
170 				new->va = addr;
171 				new->pa = s->pseg->pa+(addr-s->base);
172 				new->ref = 1;
173 				*pg = new;
174 			}
175 		}
176 
177 		if (checkaddr && addr == addr2check)
178 			(*checkaddr)(addr, s, *pg);
179 		mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
180 		(*pg)->modref = PG_MOD|PG_REF;
181 		break;
182 	}
183 	qunlock(&s->lk);
184 
185 	if(doputmmu)
186 		putmmu(addr, mmuphys, *pg);
187 
188 	return 0;
189 }
190 
191 void
192 pio(Segment *s, ulong addr, ulong soff, Page **p)
193 {
194 	Page *new;
195 	KMap *k;
196 	Chan *c;
197 	int n, ask;
198 	char *kaddr;
199 	ulong daddr;
200 	Page *loadrec;
201 
202 retry:
203 	loadrec = *p;
204 	if(loadrec == 0) {	/* from a text/data image */
205 		daddr = s->fstart+soff;
206 		new = lookpage(s->image, daddr);
207 		if(new != nil) {
208 			*p = new;
209 			return;
210 		}
211 	}
212 	else {			/* from a swap image */
213 		daddr = swapaddr(loadrec);
214 		new = lookpage(&swapimage, daddr);
215 		if(new != nil) {
216 			putswap(loadrec);
217 			*p = new;
218 			return;
219 		}
220 	}
221 
222 
223 	qunlock(&s->lk);
224 
225 	new = newpage(0, 0, addr);
226 	k = kmap(new);
227 	kaddr = (char*)VA(k);
228 
229 	if(loadrec == 0) {			/* This is demand load */
230 		c = s->image->c;
231 		while(waserror()) {
232 			if(strcmp(up->errstr, Eintr) == 0)
233 				continue;
234 			kunmap(k);
235 			putpage(new);
236 			faulterror("sys: demand load I/O error", c, 0);
237 		}
238 
239 		ask = s->flen-soff;
240 		if(ask > BY2PG)
241 			ask = BY2PG;
242 
243 		n = devtab[c->type]->read(c, kaddr, ask, daddr);
244 		if(n != ask)
245 			faulterror(Eioload, c, 0);
246 		if(ask < BY2PG)
247 			memset(kaddr+ask, 0, BY2PG-ask);
248 
249 		poperror();
250 		kunmap(k);
251 		qlock(&s->lk);
252 
253 		/*
254 		 *  race, another proc may have gotten here first while
255 		 *  s->lk was unlocked
256 		 */
257 		if(*p == 0) {
258 			new->daddr = daddr;
259 			cachepage(new, s->image);
260 			*p = new;
261 		}
262 		else
263 			putpage(new);
264 	}
265 	else {				/* This is paged out */
266 		c = swapimage.c;
267 		if(waserror()) {
268 			kunmap(k);
269 			putpage(new);
270 			qlock(&s->lk);
271 			qunlock(&s->lk);
272 			faulterror("sys: page in I/O error", c, 0);
273 		}
274 
275 		n = devtab[c->type]->read(c, kaddr, BY2PG, daddr);
276 		if(n != BY2PG)
277 			faulterror(Eioload, c, 0);
278 
279 		poperror();
280 		kunmap(k);
281 		qlock(&s->lk);
282 
283 		/*
284 		 *  race, another proc may have gotten here first
285 		 *  (and the pager may have run on that page) while
286 		 *  s->lk was unlocked
287 		 */
288 		if(*p != loadrec){
289 			if(!pagedout(*p)){
290 				/* another process did it for me */
291 				putpage(new);
292 				goto done;
293 			} else {
294 				/* another process and the pager got in */
295 				putpage(new);
296 				goto retry;
297 			}
298 		}
299 
300 		new->daddr = daddr;
301 		cachepage(new, &swapimage);
302 		*p = new;
303 		putswap(loadrec);
304 	}
305 
306 done:
307 	if(s->flushme)
308 		memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl));
309 }
310 
311 /*
312  * Called only in a system call
313  */
314 int
315 okaddr(ulong addr, ulong len, int write)
316 {
317 	Segment *s;
318 
319 	if((long)len >= 0) {
320 		for(;;) {
321 			s = seg(up, addr, 0);
322 			if(s == 0 || (write && (s->type&SG_RONLY)))
323 				break;
324 
325 			if(addr+len > s->top) {
326 				len -= s->top - addr;
327 				addr = s->top;
328 				continue;
329 			}
330 			return 1;
331 		}
332 	}
333 	pprint("suicide: invalid address %#lux/%lud in sys call pc=%#lux\n", addr, len, userpc());
334 	return 0;
335 }
336 
337 void
338 validaddr(ulong addr, ulong len, int write)
339 {
340 	if(!okaddr(addr, len, write))
341 		pexit("Suicide", 0);
342 }
343 
344 /*
345  * &s[0] is known to be a valid address.
346  */
347 void*
348 vmemchr(void *s, int c, int n)
349 {
350 	int m;
351 	ulong a;
352 	void *t;
353 
354 	a = (ulong)s;
355 	while(PGROUND(a) != PGROUND(a+n-1)){
356 		/* spans pages; handle this page */
357 		m = BY2PG - (a & (BY2PG-1));
358 		t = memchr((void*)a, c, m);
359 		if(t)
360 			return t;
361 		a += m;
362 		n -= m;
363 		if(a < KZERO)
364 			validaddr(a, 1, 0);
365 	}
366 
367 	/* fits in one page */
368 	return memchr((void*)a, c, n);
369 }
370 
371 Segment*
372 seg(Proc *p, ulong addr, int dolock)
373 {
374 	Segment **s, **et, *n;
375 
376 	et = &p->seg[NSEG];
377 	for(s = p->seg; s < et; s++) {
378 		n = *s;
379 		if(n == 0)
380 			continue;
381 		if(addr >= n->base && addr < n->top) {
382 			if(dolock == 0)
383 				return n;
384 
385 			qlock(&n->lk);
386 			if(addr >= n->base && addr < n->top)
387 				return n;
388 			qunlock(&n->lk);
389 		}
390 	}
391 
392 	return 0;
393 }
394 
395 extern void checkmmu(ulong, ulong);
396 void
397 checkpages(void)
398 {
399 	int checked;
400 	ulong addr, off;
401 	Pte *p;
402 	Page *pg;
403 	Segment **sp, **ep, *s;
404 
405 	if(up == nil)
406 		return;
407 
408 	checked = 0;
409 	for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
410 		s = *sp;
411 		if(s == nil)
412 			continue;
413 		qlock(&s->lk);
414 		for(addr=s->base; addr<s->top; addr+=BY2PG){
415 			off = addr - s->base;
416 			p = s->map[off/PTEMAPMEM];
417 			if(p == 0)
418 				continue;
419 			pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG];
420 			if(pg == 0 || pagedout(pg))
421 				continue;
422 			checkmmu(addr, pg->pa);
423 			checked++;
424 		}
425 		qunlock(&s->lk);
426 	}
427 	print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked);
428 }
429