xref: /plan9/sys/src/9/port/fault.c (revision 0400b64795cb7922fbea5587531527b381e1e588)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"../port/error.h"
7 
8 int
fault(ulong addr,int read)9 fault(ulong addr, int read)
10 {
11 	Segment *s;
12 	char *sps;
13 
14 	if(up == nil)
15 		panic("fault: nil up");
16 	if(up->nlocks.ref)
17 		print("fault: addr %#p: nlocks %ld\n", addr, up->nlocks.ref);
18 
19 	sps = up->psstate;
20 	up->psstate = "Fault";
21 	spllo();
22 
23 	m->pfault++;
24 	for(;;) {
25 		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
26 		if(s == 0) {
27 			up->psstate = sps;
28 			return -1;
29 		}
30 
31 		if(!read && (s->type&SG_RONLY)) {
32 			qunlock(&s->lk);
33 			up->psstate = sps;
34 			return -1;
35 		}
36 
37 		if(fixfault(s, addr, read, 1) == 0)	/* qunlocks s->lk */
38 			break;
39 	}
40 
41 	up->psstate = sps;
42 	return 0;
43 }
44 
45 static void
faulterror(char * s,Chan * c,int freemem)46 faulterror(char *s, Chan *c, int freemem)
47 {
48 	char buf[ERRMAX];
49 
50 	if(c && c->path){
51 		snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
52 		s = buf;
53 	}
54 	if(up->nerrlab) {
55 		postnote(up, 1, s, NDebug);
56 		error(s);
57 	}
58 	pexit(s, freemem);
59 }
60 
61 void	(*checkaddr)(ulong, Segment *, Page *);
62 ulong	addr2check;
63 
64 int
fixfault(Segment * s,ulong addr,int read,int doputmmu)65 fixfault(Segment *s, ulong addr, int read, int doputmmu)
66 {
67 	int type;
68 	int ref;
69 	Pte **p, *etp;
70 	ulong mmuphys=0, soff;
71 	Page **pg, *lkp, *new;
72 	Page *(*fn)(Segment*, ulong);
73 
74 	addr &= ~(BY2PG-1);
75 	soff = addr-s->base;
76 	p = &s->map[soff/PTEMAPMEM];
77 	if(*p == 0)
78 		*p = ptealloc();
79 
80 	etp = *p;
81 	pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
82 	type = s->type&SG_TYPE;
83 
84 	if(pg < etp->first)
85 		etp->first = pg;
86 	if(pg > etp->last)
87 		etp->last = pg;
88 
89 	switch(type) {
90 	default:
91 		panic("fault");
92 		break;
93 
94 	case SG_TEXT: 			/* Demand load */
95 		if(pagedout(*pg))
96 			pio(s, addr, soff, pg);
97 
98 		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
99 		(*pg)->modref = PG_REF;
100 		break;
101 
102 	case SG_BSS:
103 	case SG_SHARED:			/* Zero fill on demand */
104 	case SG_STACK:
105 		if(*pg == 0) {
106 			new = newpage(1, &s, addr);
107 			if(s == 0)
108 				return -1;
109 
110 			*pg = new;
111 		}
112 		goto common;
113 
114 	case SG_DATA:
115 	common:			/* Demand load/pagein/copy on write */
116 		if(pagedout(*pg))
117 			pio(s, addr, soff, pg);
118 
119 		/*
120 		 *  It's only possible to copy on write if
121 		 *  we're the only user of the segment.
122 		 */
123 		if(read && conf.copymode == 0 && s->ref == 1) {
124 			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
125 			(*pg)->modref |= PG_REF;
126 			break;
127 		}
128 
129 		lkp = *pg;
130 		lock(lkp);
131 
132 		if(lkp->image == &swapimage)
133 			ref = lkp->ref + swapcount(lkp->daddr);
134 		else
135 			ref = lkp->ref;
136 		if(ref == 1 && lkp->image){
137 			/* save a copy of the original for the image cache */
138 			duppage(lkp);
139 			ref = lkp->ref;
140 		}
141 		unlock(lkp);
142 		if(ref > 1){
143 			new = newpage(0, &s, addr);
144 			if(s == 0)
145 				return -1;
146 			*pg = new;
147 			copypage(lkp, *pg);
148 			putpage(lkp);
149 		}
150 		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
151 		(*pg)->modref = PG_MOD|PG_REF;
152 		break;
153 
154 	case SG_PHYSICAL:
155 		if(*pg == 0) {
156 			fn = s->pseg->pgalloc;
157 			if(fn)
158 				*pg = (*fn)(s, addr);
159 			else {
160 				new = smalloc(sizeof(Page));
161 				new->va = addr;
162 				new->pa = s->pseg->pa+(addr-s->base);
163 				new->ref = 1;
164 				*pg = new;
165 			}
166 		}
167 
168 		if (checkaddr && addr == addr2check)
169 			(*checkaddr)(addr, s, *pg);
170 		mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
171 		(*pg)->modref = PG_MOD|PG_REF;
172 		break;
173 	}
174 	qunlock(&s->lk);
175 
176 	if(doputmmu)
177 		putmmu(addr, mmuphys, *pg);
178 
179 	return 0;
180 }
181 
182 void
pio(Segment * s,ulong addr,ulong soff,Page ** p)183 pio(Segment *s, ulong addr, ulong soff, Page **p)
184 {
185 	Page *new;
186 	KMap *k;
187 	Chan *c;
188 	int n, ask;
189 	char *kaddr;
190 	ulong daddr;
191 	Page *loadrec;
192 
193 retry:
194 	loadrec = *p;
195 	if(loadrec == 0) {	/* from a text/data image */
196 		daddr = s->fstart+soff;
197 		new = lookpage(s->image, daddr);
198 		if(new != nil) {
199 			*p = new;
200 			return;
201 		}
202 
203 		c = s->image->c;
204 		ask = s->flen-soff;
205 		if(ask > BY2PG)
206 			ask = BY2PG;
207 	}
208 	else {			/* from a swap image */
209 		daddr = swapaddr(loadrec);
210 		new = lookpage(&swapimage, daddr);
211 		if(new != nil) {
212 			putswap(loadrec);
213 			*p = new;
214 			return;
215 		}
216 
217 		c = swapimage.c;
218 		ask = BY2PG;
219 	}
220 	qunlock(&s->lk);
221 
222 	new = newpage(0, 0, addr);
223 	k = kmap(new);
224 	kaddr = (char*)VA(k);
225 
226 	while(waserror()) {
227 		if(strcmp(up->errstr, Eintr) == 0)
228 			continue;
229 		kunmap(k);
230 		putpage(new);
231 		faulterror(Eioload, c, 0);
232 	}
233 
234 	n = devtab[c->type]->read(c, kaddr, ask, daddr);
235 	if(n != ask)
236 		faulterror(Eioload, c, 0);
237 	if(ask < BY2PG)
238 		memset(kaddr+ask, 0, BY2PG-ask);
239 
240 	poperror();
241 	kunmap(k);
242 	qlock(&s->lk);
243 	if(loadrec == 0) {	/* This is demand load */
244 		/*
245 		 *  race, another proc may have gotten here first while
246 		 *  s->lk was unlocked
247 		 */
248 		if(*p == 0) {
249 			new->daddr = daddr;
250 			cachepage(new, s->image);
251 			*p = new;
252 		}
253 		else
254 			putpage(new);
255 	}
256 	else {			/* This is paged out */
257 		/*
258 		 *  race, another proc may have gotten here first
259 		 *  (and the pager may have run on that page) while
260 		 *  s->lk was unlocked
261 		 */
262 		if(*p != loadrec){
263 			if(!pagedout(*p)){
264 				/* another process did it for me */
265 				putpage(new);
266 				goto done;
267 			} else {
268 				/* another process and the pager got in */
269 				putpage(new);
270 				goto retry;
271 			}
272 		}
273 
274 		new->daddr = daddr;
275 		cachepage(new, &swapimage);
276 		*p = new;
277 		putswap(loadrec);
278 	}
279 
280 done:
281 	if(s->flushme)
282 		memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl));
283 }
284 
285 /*
286  * Called only in a system call
287  */
288 int
okaddr(ulong addr,ulong len,int write)289 okaddr(ulong addr, ulong len, int write)
290 {
291 	Segment *s;
292 
293 	if((long)len >= 0) {
294 		for(;;) {
295 			s = seg(up, addr, 0);
296 			if(s == 0 || (write && (s->type&SG_RONLY)))
297 				break;
298 
299 			if(addr+len > s->top) {
300 				len -= s->top - addr;
301 				addr = s->top;
302 				continue;
303 			}
304 			return 1;
305 		}
306 	}
307 	pprint("suicide: invalid address %#lux/%lud in sys call pc=%#lux\n", addr, len, userpc());
308 	return 0;
309 }
310 
311 void
validaddr(ulong addr,ulong len,int write)312 validaddr(ulong addr, ulong len, int write)
313 {
314 	if(!okaddr(addr, len, write)){
315 		postnote(up, 1, "sys: bad address in syscall", NDebug);
316 		error(Ebadarg);
317 	}
318 }
319 
320 /*
321  * &s[0] is known to be a valid address.
322  */
323 void*
vmemchr(void * s,int c,int n)324 vmemchr(void *s, int c, int n)
325 {
326 	int m;
327 	ulong a;
328 	void *t;
329 
330 	a = (ulong)s;
331 	while(PGROUND(a) != PGROUND(a+n-1)){
332 		/* spans pages; handle this page */
333 		m = BY2PG - (a & (BY2PG-1));
334 		t = memchr((void*)a, c, m);
335 		if(t)
336 			return t;
337 		a += m;
338 		n -= m;
339 		if(a < KZERO)
340 			validaddr(a, 1, 0);
341 	}
342 
343 	/* fits in one page */
344 	return memchr((void*)a, c, n);
345 }
346 
347 Segment*
seg(Proc * p,ulong addr,int dolock)348 seg(Proc *p, ulong addr, int dolock)
349 {
350 	Segment **s, **et, *n;
351 
352 	et = &p->seg[NSEG];
353 	for(s = p->seg; s < et; s++) {
354 		n = *s;
355 		if(n == 0)
356 			continue;
357 		if(addr >= n->base && addr < n->top) {
358 			if(dolock == 0)
359 				return n;
360 
361 			qlock(&n->lk);
362 			if(addr >= n->base && addr < n->top)
363 				return n;
364 			qunlock(&n->lk);
365 		}
366 	}
367 
368 	return 0;
369 }
370 
371 extern void checkmmu(ulong, ulong);
372 void
checkpages(void)373 checkpages(void)
374 {
375 	int checked;
376 	ulong addr, off;
377 	Pte *p;
378 	Page *pg;
379 	Segment **sp, **ep, *s;
380 
381 	if(up == nil)
382 		return;
383 
384 	checked = 0;
385 	for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
386 		s = *sp;
387 		if(s == nil)
388 			continue;
389 		qlock(&s->lk);
390 		for(addr=s->base; addr<s->top; addr+=BY2PG){
391 			off = addr - s->base;
392 			p = s->map[off/PTEMAPMEM];
393 			if(p == 0)
394 				continue;
395 			pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG];
396 			if(pg == 0 || pagedout(pg))
397 				continue;
398 			checkmmu(addr, pg->pa);
399 			checked++;
400 		}
401 		qunlock(&s->lk);
402 	}
403 	print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked);
404 }
405