xref: /inferno-os/os/boot/pc/memory.c (revision 7ef44d652ae9e5e1f5b3465d73684e4a54de73c0)
1 /*
2  * Size memory and create the kernel page-tables on the fly while doing so.
3  * Called from main(), this code should only be run by the bootstrap processor.
4  */
5 #include "u.h"
6 #include "lib.h"
7 #include "mem.h"
8 #include "dat.h"
9 #include "fns.h"
10 #include "io.h"
11 
12 #define MEMDEBUG	0
13 
14 #define PDX(va)		((((ulong)(va))>>22) & 0x03FF)
15 #define PTX(va)		((((ulong)(va))>>12) & 0x03FF)
16 
17 enum {
18 	MemUPA		= 0,		/* unbacked physical address */
19 	MemRAM		= 1,		/* physical memory */
20 	MemUMB		= 2,		/* upper memory block (<16MB) */
21 	NMemType	= 3,
22 
23 	KB		= 1024,
24 
25 	MemMinMB	= 4,		/* minimum physical memory (<=4MB) */
26 	MemMaxMB	= 768,		/* maximum physical memory to check */
27 
28 	NMemBase	= 10,
29 };
30 
31 typedef struct {
32 	int	size;
33 	ulong	addr;
34 } Map;
35 
36 typedef struct {
37 	char*	name;
38 	Map*	map;
39 	Map*	mapend;
40 
41 	Lock;
42 } RMap;
43 
44 static Map mapupa[8];
45 static RMap rmapupa = {
46 	"unallocated unbacked physical memory",
47 	mapupa,
48 	&mapupa[7],
49 };
50 
51 static Map xmapupa[8];
52 static RMap xrmapupa = {
53 	"unbacked physical memory",
54 	xmapupa,
55 	&xmapupa[7],
56 };
57 
58 static Map mapram[8];
59 static RMap rmapram = {
60 	"physical memory",
61 	mapram,
62 	&mapram[7],
63 };
64 
65 static Map mapumb[64];
66 static RMap rmapumb = {
67 	"upper memory block",
68 	mapumb,
69 	&mapumb[63],
70 };
71 
72 static Map mapumbrw[8];
73 static RMap rmapumbrw = {
74 	"UMB device memory",
75 	mapumbrw,
76 	&mapumbrw[7],
77 };
78 
79 void
80 memdebug(void)
81 {
82 	Map *mp;
83 	ulong maxpa, maxpa1, maxpa2;
84 
85 	if(MEMDEBUG == 0)
86 		return;
87 
88 	maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
89 	maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
90 	maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
91 	print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
92 		maxpa, MB+maxpa*KB, maxpa1, maxpa2);
93 
94 	for(mp = rmapram.map; mp->size; mp++)
95 		print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
96 	for(mp = rmapumb.map; mp->size; mp++)
97 		print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
98 	for(mp = rmapumbrw.map; mp->size; mp++)
99 		print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
100 	for(mp = rmapupa.map; mp->size; mp++)
101 		print("%8.8luX %8.8luX %8.8luX\n", mp->addr, (ulong)mp->size, mp->addr+mp->size);
102 }
103 
104 void
105 mapfree(RMap* rmap, ulong addr, ulong size)
106 {
107 	Map *mp;
108 	ulong t;
109 
110 	if(size == 0)
111 		return;
112 
113 	lock(rmap);
114 	for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
115 		;
116 
117 	if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
118 		(mp-1)->size += size;
119 		if(addr+size == mp->addr){
120 			(mp-1)->size += mp->size;
121 			while(mp->size){
122 				mp++;
123 				(mp-1)->addr = mp->addr;
124 				(mp-1)->size = mp->size;
125 			}
126 		}
127 	}
128 	else{
129 		if(addr+size == mp->addr && mp->size){
130 			mp->addr -= size;
131 			mp->size += size;
132 		}
133 		else do{
134 			if(mp >= rmap->mapend){
135 				print("mapfree: %s: losing 0x%luX, %lud\n",
136 					rmap->name, addr, size);
137 				break;
138 			}
139 			t = mp->addr;
140 			mp->addr = addr;
141 			addr = t;
142 			t = mp->size;
143 			mp->size = size;
144 			mp++;
145 		}while(size = t);
146 	}
147 	unlock(rmap);
148 }
149 
150 ulong
151 mapalloc(RMap* rmap, ulong addr, int size, int align)
152 {
153 	Map *mp;
154 	ulong maddr, oaddr;
155 
156 	lock(rmap);
157 	for(mp = rmap->map; mp->size; mp++){
158 		maddr = mp->addr;
159 
160 		if(addr){
161 			/*
162 			 * A specific address range has been given:
163 			 *   if the current map entry is greater then
164 			 *   the address is not in the map;
165 			 *   if the current map entry does not overlap
166 			 *   the beginning of the requested range then
167 			 *   continue on to the next map entry;
168 			 *   if the current map entry does not entirely
169 			 *   contain the requested range then the range
170 			 *   is not in the map.
171 			 */
172 			if(maddr > addr)
173 				break;
174 			if(mp->size < addr - maddr)	/* maddr+mp->size < addr, but no overflow */
175 				continue;
176 			if(addr - maddr > mp->size - size)	/* addr+size > maddr+mp->size, but no overflow */
177 				break;
178 			maddr = addr;
179 		}
180 
181 		if(align > 0)
182 			maddr = ((maddr+align-1)/align)*align;
183 		if(mp->addr+mp->size-maddr < size)
184 			continue;
185 
186 		oaddr = mp->addr;
187 		mp->addr = maddr+size;
188 		mp->size -= maddr-oaddr+size;
189 		if(mp->size == 0){
190 			do{
191 				mp++;
192 				(mp-1)->addr = mp->addr;
193 			}while((mp-1)->size = mp->size);
194 		}
195 
196 		unlock(rmap);
197 		if(oaddr != maddr)
198 			mapfree(rmap, oaddr, maddr-oaddr);
199 
200 		return maddr;
201 	}
202 	unlock(rmap);
203 
204 	return 0;
205 }
206 
207 static void
208 umbscan(void)
209 {
210 	uchar *p;
211 
212 	/*
213 	 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
214 	 * which aren't used; they can be used later for devices which
215 	 * want to allocate some virtual address space.
216 	 * Check for two things:
217 	 * 1) device BIOS ROM. This should start with a two-byte header
218 	 *    of 0x55 0xAA, followed by a byte giving the size of the ROM
219 	 *    in 512-byte chunks. These ROM's must start on a 2KB boundary.
220 	 * 2) device memory. This is read-write.
221 	 * There are some assumptions: there's VGA memory at 0xA0000 and
222 	 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
223 	 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
224 	 * for grabs; check anyway.
225 	 */
226 	p = KADDR(0xD0000);	/*RSC: changed from 0xC0000 */
227 	while(p < (uchar*)KADDR(0xE0000)){
228 		if (p[0] == 0x55 && p[1] == 0xAA) {
229 			/* Skip p[2] chunks of 512 bytes.  Test for 0x55 AA before
230 			     poking obtrusively, or else the Thinkpad X20 dies when
231 			     setting up the cardbus (PB) */
232 			p += p[2] * 512;
233 			continue;
234 		}
235 
236 		p[0] = 0xCC;
237 		p[2*KB-1] = 0xCC;
238 		if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
239 			p[0] = 0x55;
240 			p[1] = 0xAA;
241 			p[2] = 4;
242 			if(p[0] == 0x55 && p[1] == 0xAA){
243 				p += p[2]*512;
244 				continue;
245 			}
246 			if(p[0] == 0xFF && p[1] == 0xFF)
247 				mapfree(&rmapumb, PADDR(p), 2*KB);
248 		}
249 		else
250 			mapfree(&rmapumbrw, PADDR(p), 2*KB);
251 		p += 2*KB;
252 	}
253 
254 	p = KADDR(0xE0000);
255 	if(p[0] != 0x55 || p[1] != 0xAA){
256 		p[0] = 0xCC;
257 		p[64*KB-1] = 0xCC;
258 		if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
259 			mapfree(&rmapumb, PADDR(p), 64*KB);
260 	}
261 }
262 
263 
264 void
265 meminit(ulong)
266 {
267 	/* A hack to initialize unbacked physical memory.  It's assumed PCI space is assigned by
268 	     the BIOS in the 0xF0000000 range and 9load never needs more than 0x2000... to run. These
269 	     values leave ample space for memory allocations for uninitialized PCI cards (e.g. cardbus
270 	     cards).  (pb) */
271 	ulong maxmem = 0x40000000;
272 
273 	umbscan();
274 	mapfree(&rmapupa, maxmem, 0x00000000-maxmem);
275 	if(MEMDEBUG)
276 		memdebug();
277 }
278 
279 ulong
280 umbmalloc(ulong addr, int size, int align)
281 {
282 	ulong a;
283 
284 	if(a = mapalloc(&rmapumb, addr, size, align))
285 		return (ulong)KADDR(a);
286 
287 	return 0;
288 }
289 
290 void
291 umbfree(ulong addr, int size)
292 {
293 	mapfree(&rmapumb, PADDR(addr), size);
294 }
295 
296 ulong
297 umbrwmalloc(ulong addr, int size, int align)
298 {
299 	ulong a;
300 	uchar *p;
301 
302 	if(a = mapalloc(&rmapumbrw, addr, size, align))
303 		return(ulong)KADDR(a);
304 
305 	/*
306 	 * Perhaps the memory wasn't visible before
307 	 * the interface is initialised, so try again.
308 	 */
309 	if((a = umbmalloc(addr, size, align)) == 0)
310 		return 0;
311 	p = (uchar*)a;
312 	p[0] = 0xCC;
313 	p[size-1] = 0xCC;
314 	if(p[0] == 0xCC && p[size-1] == 0xCC)
315 		return a;
316 	umbfree(a, size);
317 
318 	return 0;
319 }
320 
321 void
322 umbrwfree(ulong addr, int size)
323 {
324 	mapfree(&rmapumbrw, PADDR(addr), size);
325 }
326 
327 ulong*
328 mmuwalk(ulong* pdb, ulong va, int level, int create)
329 {
330 	ulong pa, *table;
331 
332 	/*
333 	 * Walk the page-table pointed to by pdb and return a pointer
334 	 * to the entry for virtual address va at the requested level.
335 	 * If the entry is invalid and create isn't requested then bail
336 	 * out early. Otherwise, for the 2nd level walk, allocate a new
337 	 * page-table page and register it in the 1st level.
338 	 */
339 	table = &pdb[PDX(va)];
340 	if(!(*table & PTEVALID) && create == 0)
341 		return 0;
342 
343 	switch(level){
344 
345 	default:
346 		return 0;
347 
348 	case 1:
349 		return table;
350 
351 	case 2:
352 		if(*table & PTESIZE)
353 			panic("mmuwalk2: va 0x%ux entry 0x%ux\n", va, *table);
354 		if(!(*table & PTEVALID)){
355 			pa = PADDR(ialloc(BY2PG, BY2PG));
356 			*table = pa|PTEWRITE|PTEVALID;
357 		}
358 		table = KADDR(PPN(*table));
359 
360 		return &table[PTX(va)];
361 	}
362 }
363 
364 static Lock mmukmaplock;
365 
366 ulong
367 mmukmap(ulong pa, ulong va, int size)
368 {
369 	ulong pae, *table, *pdb, pgsz, *pte, x;
370 	int pse, sync;
371 	extern int cpuidax, cpuiddx;
372 
373 	pdb = KADDR(getcr3());
374 	if((cpuiddx & 0x08) && (getcr4() & 0x10))
375 		pse = 1;
376 	else
377 		pse = 0;
378 	sync = 0;
379 
380 	pa = PPN(pa);
381 	if(va == 0)
382 		va = (ulong)KADDR(pa);
383 	else
384 		va = PPN(va);
385 
386 	pae = pa + size;
387 	lock(&mmukmaplock);
388 	while(pa < pae){
389 		table = &pdb[PDX(va)];
390 		/*
391 		 * Possibly already mapped.
392 		 */
393 		if(*table & PTEVALID){
394 			if(*table & PTESIZE){
395 				/*
396 				 * Big page. Does it fit within?
397 				 * If it does, adjust pgsz so the correct end can be
398 				 * returned and get out.
399 				 * If not, adjust pgsz up to the next 4MB boundary
400 				 * and continue.
401 				 */
402 				x = PPN(*table);
403 				if(x != pa)
404 					panic("mmukmap1: pa 0x%ux  entry 0x%ux\n",
405 						pa, *table);
406 				x += 4*MB;
407 				if(pae <= x){
408 					pa = pae;
409 					break;
410 				}
411 				pgsz = x - pa;
412 				pa += pgsz;
413 				va += pgsz;
414 
415 				continue;
416 			}
417 			else{
418 				/*
419 				 * Little page. Walk to the entry.
420 				 * If the entry is valid, set pgsz and continue.
421 				 * If not, make it so, set pgsz, sync and continue.
422 				 */
423 				pte = mmuwalk(pdb, va, 2, 0);
424 				if(pte && *pte & PTEVALID){
425 					x = PPN(*pte);
426 					if(x != pa)
427 						panic("mmukmap2: pa 0x%ux entry 0x%ux\n",
428 							pa, *pte);
429 					pgsz = BY2PG;
430 					pa += pgsz;
431 					va += pgsz;
432 					sync++;
433 
434 					continue;
435 				}
436 			}
437 		}
438 
439 		/*
440 		 * Not mapped. Check if it can be mapped using a big page -
441 		 * starts on a 4MB boundary, size >= 4MB and processor can do it.
442 		 * If not a big page, walk the walk, talk the talk.
443 		 * Sync is set.
444 		 */
445 		if(pse && (pa % (4*MB)) == 0 && (pae >= pa+4*MB)){
446 			*table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
447 			pgsz = 4*MB;
448 		}
449 		else{
450 			pte = mmuwalk(pdb, va, 2, 1);
451 			*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
452 			pgsz = BY2PG;
453 		}
454 		pa += pgsz;
455 		va += pgsz;
456 		sync++;
457 	}
458 	unlock(&mmukmaplock);
459 
460 	/*
461 	 * If something was added
462 	 * then need to sync up.
463 	 */
464 	if(sync)
465 		putcr3(PADDR(pdb));
466 
467 	return pa;
468 }
469 
470 ulong
471 upamalloc(ulong addr, int size, int align)
472 {
473 	ulong ae, a;
474 
475 	USED(align);
476 
477 	if((a = mapalloc(&rmapupa, addr, size, align)) == 0){
478 		memdebug();
479 		return 0;
480 	}
481 
482 	/*
483 	 * This is a travesty, but they all are.
484 	 */
485 	ae = mmukmap(a, 0, size);
486 
487 	/*
488 	 * Should check here that it was all delivered
489 	 * and put it back and barf if not.
490 	 */
491 	USED(ae);
492 
493 	/*
494 	 * Be very careful this returns a PHYSICAL address.
495 	 */
496 	return a;
497 }
498 
499 void
500 upafree(ulong pa, int size)
501 {
502 	USED(pa, size);
503 }
504 
505