xref: /plan9/sys/src/9/port/fault.c (revision 6a9fc400c33447ef5e1cda7185cb4de2c8e8010e)
1 #include	"u.h"
2 #include	"../port/lib.h"
3 #include	"mem.h"
4 #include	"dat.h"
5 #include	"fns.h"
6 #include	"../port/error.h"
7 
8 int
9 fault(ulong addr, int read)
10 {
11 	Segment *s;
12 	char *sps;
13 
14 	sps = up->psstate;
15 	up->psstate = "Fault";
16 	spllo();
17 
18 	m->pfault++;
19 	for(;;) {
20 		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
21 		if(s == 0) {
22 			up->psstate = sps;
23 			return -1;
24 		}
25 
26 		if(!read && (s->type&SG_RONLY)) {
27 			qunlock(&s->lk);
28 			up->psstate = sps;
29 			return -1;
30 		}
31 
32 		if(fixfault(s, addr, read, 1) == 0)
33 			break;
34 	}
35 
36 	up->psstate = sps;
37 	return 0;
38 }
39 
40 static void
41 faulterror(char *s, Chan *c, int freemem)
42 {
43 	char buf[ERRMAX];
44 
45 	if(c && c->name){
46 		snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->name->s, up->errstr);
47 		s = buf;
48 	}
49 	if(up->nerrlab) {
50 		postnote(up, 1, s, NDebug);
51 		error(s);
52 	}
53 	pexit(s, freemem);
54 }
55 
56 int
57 fixfault(Segment *s, ulong addr, int read, int doputmmu)
58 {
59 	int type;
60 	int ref;
61 	Pte **p, *etp;
62 	ulong mmuphys=0, soff;
63 	Page **pg, *lkp, *new;
64 	Page *(*fn)(Segment*, ulong);
65 
66 	addr &= ~(BY2PG-1);
67 	soff = addr-s->base;
68 	p = &s->map[soff/PTEMAPMEM];
69 	if(*p == 0)
70 		*p = ptealloc();
71 
72 	etp = *p;
73 	pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
74 	type = s->type&SG_TYPE;
75 
76 	if(pg < etp->first)
77 		etp->first = pg;
78 	if(pg > etp->last)
79 		etp->last = pg;
80 
81 	switch(type) {
82 	default:
83 		panic("fault");
84 		break;
85 
86 	case SG_TEXT: 			/* Demand load */
87 		if(pagedout(*pg))
88 			pio(s, addr, soff, pg);
89 
90 		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
91 		(*pg)->modref = PG_REF;
92 		break;
93 
94 	case SG_BSS:
95 	case SG_SHARED:			/* Zero fill on demand */
96 	case SG_STACK:
97 		if(*pg == 0) {
98 			new = newpage(1, &s, addr);
99 			if(s == 0)
100 				return -1;
101 
102 			*pg = new;
103 		}
104 		goto common;
105 
106 	case SG_DATA:
107 	common:			/* Demand load/pagein/copy on write */
108 		if(pagedout(*pg))
109 			pio(s, addr, soff, pg);
110 
111 		/*
112 		 *  It's only possible to copy on write if
113 		 *  we're the only user of the segment.
114 		 */
115 		if(read && conf.copymode == 0 && s->ref == 1) {
116 			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
117 			(*pg)->modref |= PG_REF;
118 			break;
119 		}
120 
121 		lkp = *pg;
122 		lock(lkp);
123 
124 		if(lkp->image == &swapimage)
125 			ref = lkp->ref + swapcount(lkp->daddr);
126 		else
127 			ref = lkp->ref;
128 		if(ref > 1) {
129 			unlock(lkp);
130 
131 			if(swapfull()){
132 				qunlock(&s->lk);
133 				pprint("swap space full\n");
134 				faulterror(Enoswap, nil, 1);
135 			}
136 
137 			new = newpage(0, &s, addr);
138 			if(s == 0)
139 				return -1;
140 			*pg = new;
141 			copypage(lkp, *pg);
142 			putpage(lkp);
143 		}
144 		else {
145 			/* save a copy of the original for the image cache */
146 			if(lkp->image && !swapfull())
147 				duppage(lkp);
148 
149 			unlock(lkp);
150 		}
151 		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
152 		(*pg)->modref = PG_MOD|PG_REF;
153 		break;
154 
155 	case SG_PHYSICAL:
156 		if(*pg == 0) {
157 			fn = s->pseg->pgalloc;
158 			if(fn)
159 				*pg = (*fn)(s, addr);
160 			else {
161 				new = smalloc(sizeof(Page));
162 				new->va = addr;
163 				new->pa = s->pseg->pa+(addr-s->base);
164 				new->ref = 1;
165 				*pg = new;
166 			}
167 		}
168 
169 		mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
170 		(*pg)->modref = PG_MOD|PG_REF;
171 		break;
172 	}
173 	qunlock(&s->lk);
174 
175 	if(doputmmu)
176 		putmmu(addr, mmuphys, *pg);
177 
178 	return 0;
179 }
180 
181 void
182 pio(Segment *s, ulong addr, ulong soff, Page **p)
183 {
184 	Page *new;
185 	KMap *k;
186 	Chan *c;
187 	int n, ask;
188 	char *kaddr;
189 	ulong daddr;
190 	Page *loadrec;
191 
192 retry:
193 	loadrec = *p;
194 	if(loadrec == 0) {	/* from a text/data image */
195 		daddr = s->fstart+soff;
196 		new = lookpage(s->image, daddr);
197 		if(new != nil) {
198 			*p = new;
199 			return;
200 		}
201 	}
202 	else {			/* from a swap image */
203 		daddr = swapaddr(loadrec);
204 		new = lookpage(&swapimage, daddr);
205 		if(new != nil) {
206 			putswap(loadrec);
207 			*p = new;
208 			return;
209 		}
210 	}
211 
212 
213 	qunlock(&s->lk);
214 
215 	new = newpage(0, 0, addr);
216 	k = kmap(new);
217 	kaddr = (char*)VA(k);
218 
219 	if(loadrec == 0) {			/* This is demand load */
220 		c = s->image->c;
221 		while(waserror()) {
222 			if(strcmp(up->errstr, Eintr) == 0)
223 				continue;
224 			kunmap(k);
225 			putpage(new);
226 			faulterror("sys: demand load I/O error", c, 0);
227 		}
228 
229 		ask = s->flen-soff;
230 		if(ask > BY2PG)
231 			ask = BY2PG;
232 
233 		n = devtab[c->type]->read(c, kaddr, ask, daddr);
234 		if(n != ask)
235 			faulterror(Eioload, c, 0);
236 		if(ask < BY2PG)
237 			memset(kaddr+ask, 0, BY2PG-ask);
238 
239 		poperror();
240 		kunmap(k);
241 		qlock(&s->lk);
242 
243 		/*
244 		 *  race, another proc may have gotten here first while
245 		 *  s->lk was unlocked
246 		 */
247 		if(*p == 0) {
248 			new->daddr = daddr;
249 			cachepage(new, s->image);
250 			*p = new;
251 		}
252 		else
253 			putpage(new);
254 	}
255 	else {				/* This is paged out */
256 		c = swapimage.c;
257 		if(waserror()) {
258 			kunmap(k);
259 			putpage(new);
260 			qlock(&s->lk);
261 			qunlock(&s->lk);
262 			faulterror("sys: page in I/O error", c, 0);
263 		}
264 
265 		n = devtab[c->type]->read(c, kaddr, BY2PG, daddr);
266 		if(n != BY2PG)
267 			faulterror(Eioload, c, 0);
268 
269 		poperror();
270 		kunmap(k);
271 		qlock(&s->lk);
272 
273 		/*
274 		 *  race, another proc may have gotten here first
275 		 *  (and the pager may have run on that page) while
276 		 *  s->lk was unlocked
277 		 */
278 		if(*p != loadrec){
279 			if(!pagedout(*p)){
280 				/* another process did it for me */
281 				putpage(new);
282 				goto done;
283 			} else {
284 				/* another process and the pager got in */
285 				putpage(new);
286 				goto retry;
287 			}
288 		}
289 
290 		new->daddr = daddr;
291 		cachepage(new, &swapimage);
292 		*p = new;
293 		putswap(loadrec);
294 	}
295 
296 done:
297 	if(s->flushme)
298 		memset((*p)->cachectl, PG_TXTFLUSH, sizeof((*p)->cachectl));
299 }
300 
301 /*
302  * Called only in a system call
303  */
304 int
305 okaddr(ulong addr, ulong len, int write)
306 {
307 	Segment *s;
308 
309 	if((long)len >= 0) {
310 		for(;;) {
311 			s = seg(up, addr, 0);
312 			if(s == 0 || (write && (s->type&SG_RONLY)))
313 				break;
314 
315 			if(addr+len > s->top) {
316 				len -= s->top - addr;
317 				addr = s->top;
318 				continue;
319 			}
320 			return 1;
321 		}
322 	}
323 	pprint("suicide: invalid address 0x%lux in sys call pc=0x%lux\n", addr, userpc());
324 	return 0;
325 }
326 
327 void
328 validaddr(ulong addr, ulong len, int write)
329 {
330 	if(!okaddr(addr, len, write))
331 		pexit("Suicide", 0);
332 }
333 
334 /*
335  * &s[0] is known to be a valid address.
336  */
337 void*
338 vmemchr(void *s, int c, int n)
339 {
340 	int m;
341 	ulong a;
342 	void *t;
343 
344 	a = (ulong)s;
345 	while(PGROUND(a) != PGROUND(a+n-1)){
346 		/* spans pages; handle this page */
347 		m = BY2PG - (a & (BY2PG-1));
348 		t = memchr((void*)a, c, m);
349 		if(t)
350 			return t;
351 		a += m;
352 		n -= m;
353 		if((a & KZERO) != KZERO)
354 			validaddr(a, 1, 0);
355 	}
356 
357 	/* fits in one page */
358 	return memchr((void*)a, c, n);
359 }
360 
361 Segment*
362 seg(Proc *p, ulong addr, int dolock)
363 {
364 	Segment **s, **et, *n;
365 
366 	et = &p->seg[NSEG];
367 	for(s = p->seg; s < et; s++) {
368 		n = *s;
369 		if(n == 0)
370 			continue;
371 		if(addr >= n->base && addr < n->top) {
372 			if(dolock == 0)
373 				return n;
374 
375 			qlock(&n->lk);
376 			if(addr >= n->base && addr < n->top)
377 				return n;
378 			qunlock(&n->lk);
379 		}
380 	}
381 
382 	return 0;
383 }
384