xref: /plan9-contrib/sys/src/9k/port/fault.c (revision 094d68186d4cdde21fdab9786d6c843a03693e4e)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"../port/error.h"
7 
8 int
fault(uintptr addr,int read)9 fault(uintptr addr, int read)
10 {
11 	Segment *s;
12 	char *sps;
13 	int i, color;
14 
15 	if(up == nil)
16 		panic("fault: nil up");
17 	if(up->nlocks)
18 		print("fault: addr %#p: nlocks %d\n", addr, up->nlocks);
19 
20 	sps = up->psstate;
21 	up->psstate = "Fault";
22 	spllo();
23 
24 	m->pfault++;
25 	for(i = 0;; i++) {
26 		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
27 		if(s == nil) {
28 			up->psstate = sps;
29 			return -1;
30 		}
31 
32 		if(!read && (s->type&SG_RONLY)) {
33 			qunlock(&s->lk);
34 			up->psstate = sps;
35 			return -1;
36 		}
37 
38 		color = s->color;
39 		if(i > 3)
40 			color = -1;
41 		if(fixfault(s, addr, read, 1, color) == 0)	/* qunlocks s->lk */
42 			break;
43 
44 		/*
45 		 * See the comment in newpage that describes
46 		 * how to get here.
47 		 */
48 	}
49 
50 	up->psstate = sps;
51 	return 0;
52 }
53 
54 static void
faulterror(char * s,Chan * c,int freemem)55 faulterror(char *s, Chan *c, int freemem)
56 {
57 	char buf[ERRMAX];
58 
59 	if(c && c->path){
60 		snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
61 		s = buf;
62 	}
63 	if(up->nerrlab) {
64 		postnote(up, 1, s, NDebug);
65 		error(s);
66 	}
67 	pexit(s, freemem);
68 }
69 
70 void	(*checkaddr)(ulong, Segment *, Page *);
71 ulong	addr2check;
72 
73 int
fixfault(Segment * s,uintptr addr,int read,int dommuput,int color)74 fixfault(Segment *s, uintptr addr, int read, int dommuput, int color)
75 {
76 	int type;
77 	int ref;
78 	Pte **p, *etp;
79 	Page **pg, *lkp, *new;
80 	Page *(*fn)(Segment*, uintptr);
81 	uintptr mmuphys, pgsize, soff;
82 
83 	pgsize = segpgsize(s);
84 	addr &= ~(pgsize-1);
85 	soff = addr-s->base;
86 	p = &s->map[soff/s->ptemapmem];
87 	if(*p == nil)
88 		*p = ptealloc();
89 
90 	etp = *p;
91 	pg = &etp->pages[(soff&(s->ptemapmem-1))>>s->lg2pgsize];
92 	type = s->type&SG_TYPE;
93 
94 	if(pg < etp->first)
95 		etp->first = pg;
96 	if(pg > etp->last)
97 		etp->last = pg;
98 
99 	mmuphys = 0;
100 	switch(type) {
101 	default:
102 		panic("fault");
103 		break;
104 
105 	case SG_TEXT: 			/* Demand load */
106 		if(pagedout(*pg))
107 			pio(s, addr, soff, pg, color);
108 
109 		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
110 		(*pg)->modref = PG_REF;
111 		break;
112 
113 	case SG_BSS:
114 	case SG_SHARED:			/* Zero fill on demand */
115 	case SG_STACK:
116 		if(*pg == nil) {
117 			new = newpage(1, s, addr, s->lg2pgsize, color, 1);
118 			if(new == nil)
119 				return -1;
120 
121 			*pg = new;
122 		}
123 		goto common;
124 
125 	case SG_DATA:
126 	common:			/* Demand load/pagein/copy on write */
127 		if(pagedout(*pg))
128 			pio(s, addr, soff, pg, color);
129 
130 		/*
131 		 *  It's only possible to copy on write if
132 		 *  we're the only user of the segment.
133 		 */
134 		if(read && sys->copymode == 0 && s->ref == 1) {
135 			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
136 			(*pg)->modref |= PG_REF;
137 			break;
138 		}
139 
140 		lkp = *pg;
141 		lock(lkp);
142 
143 		ref = lkp->ref;
144 		if(ref <= 0)
145 			panic("fault: page->ref %d <= 0", ref);
146 
147 		if(ref == 1 && lkp->image != nil){
148 			duppage(lkp);
149 			ref = lkp->ref;
150 		}
151 		unlock(lkp);
152 
153 		if(ref > 1) {
154 			new = newpage(0, s, addr, s->lg2pgsize, color, 1);
155 			if(new == nil)
156 				return -1;
157 			*pg = new;
158 			copypage(lkp, *pg);
159 			putpage(lkp);
160 		}
161 		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
162 		(*pg)->modref = PG_MOD|PG_REF;
163 		break;
164 
165 	case SG_PHYSICAL:
166 		if(*pg == nil) {
167 			fn = s->pseg->pgalloc;
168 			if(fn)
169 				*pg = (*fn)(s, addr);
170 			else {
171 				new = smalloc(sizeof(Page));
172 				new->va = addr;
173 				new->pa = s->pseg->pa+(addr-s->base);
174 				new->ref = 1;
175 				new->lg2size = s->pseg->lg2pgsize;
176 				if(new->lg2size == 0)
177 					new->lg2size = PGSHFT;	/* TO DO */
178 				*pg = new;
179 			}
180 		}
181 
182 		if (checkaddr && addr == addr2check)
183 			(*checkaddr)(addr, s, *pg);
184 		mmuphys = PPN((*pg)->pa) | PTEVALID;
185 		if((s->pseg->attr & SG_RONLY) == 0)
186 			mmuphys |= PTEWRITE;
187 		if((s->pseg->attr & SG_CACHED) == 0)
188 			mmuphys |= PTEUNCACHED;
189 		(*pg)->modref = PG_MOD|PG_REF;
190 		break;
191 	}
192 	qunlock(&s->lk);
193 
194 	if(dommuput)
195 		mmuput(addr, mmuphys, *pg);
196 
197 	return 0;
198 }
199 
200 void
pio(Segment * s,uintptr addr,uintptr soff,Page ** p,int color)201 pio(Segment *s, uintptr addr, uintptr soff, Page **p, int color)
202 {
203 	Page *new;
204 	KMap *k;
205 	Chan *c;
206 	int n, ask;
207 	char *kaddr;
208 	ulong daddr;
209 	Page *loadrec;
210 	uintptr pgsize;
211 
212 	pgsize = segpgsize(s);
213 	loadrec = *p;
214 	if(!pagedout(*p) || loadrec != nil)
215 		return;
216 	/* demand load from a text/data image */
217 	daddr = s->fstart+soff;
218 	new = lookpage(s->image, daddr);
219 	if(new != nil) {
220 		*p = new;
221 		return;
222 	}
223 
224 	c = s->image->c;
225 	ask = s->flen-soff;
226 	if(ask > pgsize)
227 		ask = pgsize;
228 	qunlock(&s->lk);
229 
230 	new = newpage(0, s, addr, s->lg2pgsize, color, 0);
231 	if(new == nil)
232 		panic("pio");	/* can't happen, s wasn't locked */
233 
234 	k = kmap(new);
235 	kaddr = (char*)VA(k);
236 
237 	while(waserror()) {
238 		if(strcmp(up->errstr, Eintr) == 0)
239 			continue;
240 		kunmap(k);
241 		putpage(new);
242 		faulterror(Eioload, c, 0);
243 	}
244 
245 	n = c->dev->read(c, kaddr, ask, daddr);
246 	if(n != ask)
247 		faulterror(Eioload, c, 0);
248 	if(ask < pgsize)
249 		memset(kaddr+ask, 0, pgsize-ask);
250 
251 	poperror();
252 	kunmap(k);
253 
254 	qlock(&s->lk);
255 	/*
256 	 *  race, another proc may have read the page in while
257 	 *  s->lk was unlocked
258 	 */
259 	if(*p == nil) {
260 		new->daddr = daddr;
261 		cachepage(new, s->image);
262 		*p = new;
263 	}
264 	else
265 		putpage(new);
266 
267 	if(s->flushme)
268 		mmucachectl(*p, PG_TXTFLUSH);
269 }
270 
271 /*
272  * Called only in a system call
273  */
274 int
okaddr(uintptr addr,long len,int write)275 okaddr(uintptr addr, long len, int write)
276 {
277 	Segment *s;
278 
279 	/* second test is paranoia only needed on 64-bit systems */
280 	if(len >= 0 && addr+len >= addr)
281 		while ((s = seg(up, addr, 0)) != nil &&
282 		    (!write || !(s->type&SG_RONLY))) {
283 			if(addr+len <= s->top)
284 				return 1;
285 			len -= s->top - addr;
286 			addr = s->top;
287 		}
288 	return 0;
289 }
290 
291 void*
validaddr(void * addr,long len,int write)292 validaddr(void* addr, long len, int write)
293 {
294 	if(!okaddr(PTR2UINT(addr), len, write)){
295 		pprint("suicide: invalid address %#p/%ld in sys call pc=%#p\n",
296 			addr, len, userpc(nil));
297 		pexit("Suicide", 0);
298 	}
299 
300 	return UINT2PTR(addr);
301 }
302 
303 /*
304  * &s[0] is known to be a valid address.
305  */
306 void*
vmemchr(void * s,int c,int n)307 vmemchr(void *s, int c, int n)
308 {
309 	int r;
310 	uintptr a;
311 	void *t;
312 
313 	a = PTR2UINT(s);
314 	while(ROUNDUP(a, PGSZ) != ROUNDUP(a+n-1, PGSZ)){
315 		/* spans pages; handle this page */
316 		r = PGSZ - (a & (PGSZ-1));
317 		t = memchr(UINT2PTR(a), c, r);
318 		if(t)
319 			return t;
320 		a += r;
321 		n -= r;
322 		if(!iskaddr(a))
323 			validaddr(UINT2PTR(a), 1, 0);
324 	}
325 
326 	/* fits in one page */
327 	return memchr(UINT2PTR(a), c, n);
328 }
329 
330 Segment*
seg(Proc * p,uintptr addr,int dolock)331 seg(Proc *p, uintptr addr, int dolock)
332 {
333 	Segment **s, **et, *n;
334 
335 	et = &p->seg[NSEG];
336 	for(s = p->seg; s < et; s++) {
337 		n = *s;
338 		if(n == nil)
339 			continue;
340 		if(addr >= n->base && addr < n->top) {
341 			if(dolock == 0)
342 				return n;
343 
344 			qlock(&n->lk);
345 			if(addr >= n->base && addr < n->top)
346 				return n;
347 			qunlock(&n->lk);
348 		}
349 	}
350 
351 	return 0;
352 }
353 
354 extern void checkmmu(uintptr, uintmem);
355 void
checkpages(void)356 checkpages(void)
357 {
358 	int checked;
359 	uintptr addr, off;
360 	Pte *p;
361 	Page *pg;
362 	Segment **sp, **ep, *s;
363 	uint pgsize;
364 
365 	if(up == nil)
366 		return;
367 
368 	checked = 0;
369 	for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
370 		s = *sp;
371 		if(s == nil)
372 			continue;
373 		qlock(&s->lk);
374 		pgsize = segpgsize(s);
375 		for(addr=s->base; addr<s->top; addr+=pgsize){
376 			off = addr - s->base;
377 			p = s->map[off/s->ptemapmem];
378 			if(p == nil)
379 				continue;
380 			pg = p->pages[(off&(s->ptemapmem-1))/pgsize];
381 			if(pg == nil || pagedout(pg))
382 				continue;
383 			checkmmu(addr, pg->pa);
384 			checked++;
385 		}
386 		qunlock(&s->lk);
387 	}
388        print("%d %s: checked %d page table entries\n", up->pid, up->text, checked);
389 }
390