xref: /inferno-os/os/pc/memory.c (revision 043f83732c06a092cd12b5ad4f92264dee44c61a)
1 /*
2  * Size memory and create the kernel page-tables on the fly while doing so.
3  * Called from main(), this code should only be run by the bootstrap processor.
4  */
5 #include "u.h"
6 #include "../port/lib.h"
7 #include "mem.h"
8 #include "dat.h"
9 #include "fns.h"
10 #include "io.h"
11 
12 #define MEMDEBUG	0
13 
14 enum {
15 	MemUPA		= 0,		/* unbacked physical address */
16 	MemRAM		= 1,		/* physical memory */
17 	MemUMB		= 2,		/* upper memory block (<16MB) */
18 	NMemType	= 3,
19 
20 	KB		= 1024,
21 
22 	MemMinMB	= 4,		/* minimum physical memory (<=4MB) */
23 	MemMaxMB	= 768,		/* maximum physical memory to check */
24 
25 	NMemBase	= 10,
26 };
27 
28 typedef struct {
29 	int	size;
30 	ulong	addr;
31 } Map;
32 
33 typedef struct {
34 	char*	name;
35 	Map*	map;
36 	Map*	mapend;
37 
38 	Lock;
39 } RMap;
40 
41 static Map mapupa[16];
42 static RMap rmapupa = {
43 	"unallocated unbacked physical memory",
44 	mapupa,
45 	&mapupa[nelem(mapupa)-1],
46 };
47 
48 static Map xmapupa[16];
49 static RMap xrmapupa = {
50 	"unbacked physical memory",
51 	xmapupa,
52 	&xmapupa[nelem(xmapupa)-1],
53 };
54 
55 static Map mapram[16];
56 static RMap rmapram = {
57 	"physical memory",
58 	mapram,
59 	&mapram[nelem(mapram)-1],
60 };
61 
62 static Map mapumb[64];
63 static RMap rmapumb = {
64 	"upper memory block",
65 	mapumb,
66 	&mapumb[nelem(mapumb)-1],
67 };
68 
69 static Map mapumbrw[16];
70 static RMap rmapumbrw = {
71 	"UMB device memory",
72 	mapumbrw,
73 	&mapumbrw[nelem(mapumbrw)-1],
74 };
75 
76 void
77 mapprint(RMap *rmap)
78 {
79 	Map *mp;
80 
81 	print("%s\n", rmap->name);
82 	for(mp = rmap->map; mp->size; mp++)
83 		print("\t%8.8luX %8.8uX %8.8luX\n", mp->addr, mp->size, mp->addr+mp->size);
84 }
85 
86 void
87 memdebug(void)
88 {
89 	ulong maxpa, maxpa1, maxpa2;
90 
91 	if(MEMDEBUG == 0)
92 		return;
93 
94 	maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
95 	maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
96 	maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
97 	print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
98 		maxpa, MB+maxpa*KB, maxpa1, maxpa2);
99 
100 	mapprint(&rmapram);
101 	mapprint(&rmapumb);
102 	mapprint(&rmapumbrw);
103 	mapprint(&rmapupa);
104 }
105 
106 void
107 mapfree(RMap* rmap, ulong addr, ulong size)
108 {
109 	Map *mp;
110 	ulong t;
111 
112 	if(size <= 0)
113 		return;
114 
115 	lock(rmap);
116 	for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
117 		;
118 
119 	if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
120 		(mp-1)->size += size;
121 		if(addr+size == mp->addr){
122 			(mp-1)->size += mp->size;
123 			while(mp->size){
124 				mp++;
125 				(mp-1)->addr = mp->addr;
126 				(mp-1)->size = mp->size;
127 			}
128 		}
129 	}
130 	else{
131 		if(addr+size == mp->addr && mp->size){
132 			mp->addr -= size;
133 			mp->size += size;
134 		}
135 		else do{
136 			if(mp >= rmap->mapend){
137 				print("mapfree: %s: losing 0x%luX, %ld\n",
138 					rmap->name, addr, size);
139 				break;
140 			}
141 			t = mp->addr;
142 			mp->addr = addr;
143 			addr = t;
144 			t = mp->size;
145 			mp->size = size;
146 			mp++;
147 		}while(size = t);
148 	}
149 	unlock(rmap);
150 }
151 
152 ulong
153 mapalloc(RMap* rmap, ulong addr, int size, int align)
154 {
155 	Map *mp;
156 	ulong maddr, oaddr;
157 
158 	lock(rmap);
159 	for(mp = rmap->map; mp->size; mp++){
160 		maddr = mp->addr;
161 
162 		if(addr){
163 			/*
164 			 * A specific address range has been given:
165 			 *   if the current map entry is greater then
166 			 *   the address is not in the map;
167 			 *   if the current map entry does not overlap
168 			 *   the beginning of the requested range then
169 			 *   continue on to the next map entry;
170 			 *   if the current map entry does not entirely
171 			 *   contain the requested range then the range
172 			 *   is not in the map.
173 			 */
174 			if(maddr > addr)
175 				break;
176 			if(mp->size < addr - maddr)	/* maddr+mp->size < addr, but no overflow */
177 				continue;
178 			if(addr - maddr > mp->size - size)	/* addr+size > maddr+mp->size, but no overflow */
179 				break;
180 			maddr = addr;
181 		}
182 
183 		if(align > 0)
184 			maddr = ((maddr+align-1)/align)*align;
185 		if(mp->addr+mp->size-maddr < size)
186 			continue;
187 
188 		oaddr = mp->addr;
189 		mp->addr = maddr+size;
190 		mp->size -= maddr-oaddr+size;
191 		if(mp->size == 0){
192 			do{
193 				mp++;
194 				(mp-1)->addr = mp->addr;
195 			}while((mp-1)->size = mp->size);
196 		}
197 
198 		unlock(rmap);
199 		if(oaddr != maddr)
200 			mapfree(rmap, oaddr, maddr-oaddr);
201 
202 		return maddr;
203 	}
204 	unlock(rmap);
205 
206 	return 0;
207 }
208 
209 static void
210 umbscan(void)
211 {
212 	uchar *p;
213 
214 	/*
215 	 * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
216 	 * which aren't used; they can be used later for devices which
217 	 * want to allocate some virtual address space.
218 	 * Check for two things:
219 	 * 1) device BIOS ROM. This should start with a two-byte header
220 	 *    of 0x55 0xAA, followed by a byte giving the size of the ROM
221 	 *    in 512-byte chunks. These ROM's must start on a 2KB boundary.
222 	 * 2) device memory. This is read-write.
223 	 * There are some assumptions: there's VGA memory at 0xA0000 and
224 	 * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
225 	 * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
226 	 * for grabs; check anyway.
227 	 */
228 	p = KADDR(0xD0000);
229 	while(p < (uchar*)KADDR(0xE0000)){
230 		/*
231 		 * Test for 0x55 0xAA before poking obtrusively,
232 		 * some machines (e.g. Thinkpad X20) seem to map
233 		 * something dynamic here (cardbus?) causing weird
234 		 * problems if it is changed.
235 		 */
236 		if(p[0] == 0x55 && p[1] == 0xAA){
237 			p += p[2]*512;
238 			continue;
239 		}
240 
241 		p[0] = 0xCC;
242 		p[2*KB-1] = 0xCC;
243 		if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
244 			p[0] = 0x55;
245 			p[1] = 0xAA;
246 			p[2] = 4;
247 			if(p[0] == 0x55 && p[1] == 0xAA){
248 				p += p[2]*512;
249 				continue;
250 			}
251 			if(p[0] == 0xFF && p[1] == 0xFF)
252 				mapfree(&rmapumb, PADDR(p), 2*KB);
253 		}
254 		else
255 			mapfree(&rmapumbrw, PADDR(p), 2*KB);
256 		p += 2*KB;
257 	}
258 
259 	p = KADDR(0xE0000);
260 	if(p[0] != 0x55 || p[1] != 0xAA){
261 		p[0] = 0xCC;
262 		p[64*KB-1] = 0xCC;
263 		if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
264 			mapfree(&rmapumb, PADDR(p), 64*KB);
265 	}
266 }
267 
268 
269 static void
270 ramscan(ulong maxmem)
271 {
272 	ulong *k0, kzero, map, maxpa, pa, *pte, *table, *va, x, n;
273 	int nvalid[NMemType];
274 	uchar *bda;
275 
276 	/*
277 	 * The bootstrap code has has created a prototype page
278 	 * table which maps the first MemMinMB of physical memory to KZERO.
279 	 * The page directory is at m->pdb and the first page of
280 	 * free memory is after the per-processor MMU information.
281 	 */
282 	/*
283 	 * Initialise the memory bank information for conventional memory
284 	 * (i.e. less than 640KB). The base is the first location after the
285 	 * bootstrap processor MMU information and the limit is obtained from
286 	 * the BIOS data area.
287 	 */
288 	x = PADDR(CPU0MACH+BY2PG);
289 	bda = (uchar*)KADDR(0x400);
290 	n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
291 	mapfree(&rmapram, x, n);
292 //	memset(KADDR(x), 0, n);			/* keep us honest */
293 
294 	x = PADDR(PGROUND((ulong)end));
295 	pa = MemMinMB*MB;
296 	mapfree(&rmapram, x, pa-x);
297 //	memset(KADDR(x), 0, pa-x);		/* keep us honest */
298 
299 	/*
300 	 * Check if the extended memory size can be obtained from the CMOS.
301 	 * If it's 0 then it's either not known or >= 64MB. Always check
302 	 * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
303 	 * in this case the memory from the gap is remapped to the top of
304 	 * memory.
305 	 * The value in CMOS is supposed to be the number of KB above 1MB.
306 	 */
307 	if(maxmem == 0){
308 		x = (nvramread(0x18)<<8)|nvramread(0x17);
309 		if(x == 0 || x >= (63*KB))
310 			maxpa = MemMaxMB*MB;
311 		else
312 			maxpa = MB+x*KB;
313 		if(maxpa < 24*MB)
314 			maxpa = 24*MB;
315 		maxmem = MemMaxMB*MB;
316 	}
317 	else
318 		maxpa = maxmem;
319 
320 	/*
321 	 * March up memory from MemMinMB to maxpa 1MB at a time,
322 	 * mapping the first page and checking the page can
323 	 * be written and read correctly. The page tables are created here
324 	 * on the fly, allocating from low memory as necessary.
325 	 */
326 	k0 = (ulong*)KADDR(0);
327 	kzero = *k0;
328 	map = 0;
329 	x = 0x12345678;
330 	memset(nvalid, 0, sizeof(nvalid));
331 	while(pa < maxpa){
332 		/*
333 		 * Map the page. Use mapalloc(&rmapram, ...) to make
334 		 * the page table if necessary, it will be returned to the
335 		 * pool later if it isn't needed.
336 		 */
337 		va = KADDR(pa);
338 		table = &m->pdb[PDX(va)];
339 		if(*table == 0){
340 			if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
341 				break;
342 			memset(KADDR(map), 0, BY2PG);
343 			*table = map|PTEWRITE|PTEVALID;
344 			memset(nvalid, 0, sizeof(nvalid));
345 		}
346 		table = KADDR(PPN(*table));
347 		pte = &table[PTX(va)];
348 
349 		*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
350 		mmuflushtlb(PADDR(m->pdb));
351 
352 		/*
353 		 * Write a pattern to the page and write a different
354 		 * pattern to a possible mirror at KZER0. If the data
355 		 * reads back correctly the chunk is some type of RAM (possibly
356 		 * a linearly-mapped VGA framebuffer, for instance...) and
357 		 * can be cleared and added to the memory pool. If not, the
358 		 * chunk is marked uncached and added to the UMB pool if <16MB
359 		 * or is marked invalid and added to the UPA pool.
360 		 */
361 		*va = x;
362 		*k0 = ~x;
363 		if(*va == x){
364 			nvalid[MemRAM] += MB/BY2PG;
365 			mapfree(&rmapram, pa, MB);
366 
367 			do{
368 				*pte++ = pa|PTEWRITE|PTEVALID;
369 				pa += BY2PG;
370 			}while(pa % MB);
371 			mmuflushtlb(PADDR(m->pdb));
372 			/* memset(va, 0, MB); so damn slow to memset all of memory */
373 		}
374 		else if(pa < 16*MB){
375 			nvalid[MemUMB] += MB/BY2PG;
376 			mapfree(&rmapumb, pa, MB);
377 
378 			do{
379 				*pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
380 				pa += BY2PG;
381 			}while(pa % MB);
382 		}
383 		else{
384 			nvalid[MemUPA] += MB/BY2PG;
385 			mapfree(&rmapupa, pa, MB);
386 
387 			*pte = 0;
388 			pa += MB;
389 		}
390 
391 		/*
392 		 * Done with this 4MB chunk, review the options:
393 		 * 1) not physical memory and >=16MB - invalidate the PDB entry;
394 		 * 2) physical memory - use the 4MB page extension if possible;
395 		 * 3) not physical memory and <16MB - use the 4MB page extension
396 		 *    if possible;
397 		 * 4) mixed or no 4MB page extension - commit the already
398 		 *    initialised space for the page table.
399 		 */
400 		if((pa % (4*MB)) == 0){
401 			table = &m->pdb[PDX(va)];
402 			if(nvalid[MemUPA] == (4*MB)/BY2PG)
403 				*table = 0;
404 			else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
405 				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
406 			else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
407 				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
408 			else
409 				map = 0;
410 		}
411 
412 		mmuflushtlb(PADDR(m->pdb));
413 		x += 0x3141526;
414 	}
415 
416 	/*
417 	 * If we didn't reach the end of the 4MB chunk, that part won't
418 	 * be mapped.  Commit the already initialised space for the page table.
419 	 */
420 	if(pa % (4*MB))
421 		map = 0;
422 
423 	if(map)
424 		mapfree(&rmapram, map, BY2PG);
425 	if(pa < maxmem)
426 		mapfree(&rmapupa, pa, maxmem-pa);
427 	if(maxmem < 0xFFE00000)
428 		mapfree(&rmapupa, maxmem, 0x00000000-maxmem);
429 	if(MEMDEBUG)
430 		print("maxmem %luX %luX\n", maxmem, 0x00000000-maxmem);
431 	*k0 = kzero;
432 }
433 
434 void
435 meminit(ulong maxmem)
436 {
437 	Map *mp, *xmp;
438 	ulong pa, *pte;
439 
440 	/*
441 	 * Set special attributes for memory between 640KB and 1MB:
442 	 *   VGA memory is writethrough;
443 	 *   BIOS ROM's/UMB's are uncached;
444 	 * then scan for useful memory.
445 	 */
446 	for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
447 		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
448 		*pte |= PTEWT;
449 	}
450 	for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
451 		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
452 		*pte |= PTEUNCACHED;
453 	}
454 	mmuflushtlb(PADDR(m->pdb));
455 
456 	umbscan();
457 	ramscan(maxmem);
458 
459 	/*
460 	 * Set the conf entries describing two banks of allocatable memory.
461 	 * Grab the first and largest entries in rmapram as left by ramscan().
462 	 *
463 	 * It would be nice to have more than 2 memory banks describable in conf.
464 	 */
465 	mp = rmapram.map;
466 	conf.base0 = mp->addr;
467 	conf.npage0 = mp->size/BY2PG;
468 	mp++;
469 	for(xmp = 0; mp->size; mp++){
470 		if(xmp == 0 || mp->size > xmp->size)
471 			xmp = mp;
472 	}
473 
474 	if(xmp){
475 		conf.base1 = xmp->addr;
476 		conf.npage1 = xmp->size/BY2PG;
477 	}
478 	if(MEMDEBUG)
479 		memdebug();
480 }
481 
482 ulong
483 umbmalloc(ulong addr, int size, int align)
484 {
485 	ulong a;
486 
487 	if(a = mapalloc(&rmapumb, addr, size, align))
488 		return (ulong)KADDR(a);
489 
490 	return 0;
491 }
492 
493 void
494 umbfree(ulong addr, int size)
495 {
496 	mapfree(&rmapumb, PADDR(addr), size);
497 }
498 
499 ulong
500 umbrwmalloc(ulong addr, int size, int align)
501 {
502 	ulong a;
503 	uchar *p;
504 
505 	if(a = mapalloc(&rmapumbrw, addr, size, align))
506 		return(ulong)KADDR(a);
507 
508 	/*
509 	 * Perhaps the memory wasn't visible before
510 	 * the interface is initialised, so try again.
511 	 */
512 	if((a = umbmalloc(addr, size, align)) == 0)
513 		return 0;
514 	p = (uchar*)a;
515 	p[0] = 0xCC;
516 	p[size-1] = 0xCC;
517 	if(p[0] == 0xCC && p[size-1] == 0xCC)
518 		return a;
519 	umbfree(a, size);
520 
521 	return 0;
522 }
523 
524 void
525 umbrwfree(ulong addr, int size)
526 {
527 	mapfree(&rmapumbrw, PADDR(addr), size);
528 }
529 
530 ulong
531 upamalloc(ulong pa, int size, int align)
532 {
533 	ulong a, ae;
534 
535 	if(a = mapalloc(&xrmapupa, pa, size, align))
536 		return a;
537 
538 	if((a = mapalloc(&rmapupa, pa, size, align)) == 0){
539 		memdebug();
540 		return 0;
541 	}
542 
543 	/*
544 	 * Upamalloc is a request to map a range of physical addresses.
545 	 * Therefore, if pa is 0 mapalloc will choose the base address.
546 	 * Note, however, mmukmap is always asked to give a 1-to-1 mapping
547 	 * of va to pa.
548 	 */
549 	ae = mmukmap(a, a, size);
550 
551 	/*
552 	 * Should check here that it was all delivered
553 	 * and put it back and barf if not.
554 	 */
555 	USED(ae);
556 
557 	/*
558 	 * Be very careful this returns a PHYSICAL address
559 	 * mapped 1-to-1 with the virtual address.
560 	 * If a < KZERO it's probably not a good idea to
561 	 * try KADDR(a)...
562 	 */
563 	return a;
564 }
565 
566 void
567 upafree(ulong pa, int size)
568 {
569 	mapfree(&xrmapupa, pa, size);
570 }
571 
572 void
573 upareserve(ulong pa, int size)
574 {
575 	ulong a;
576 
577 	a = mapalloc(&rmapupa, pa, size, 0);
578 	if(a != pa){
579 		/*
580 		 * This can happen when we're using the E820
581 		 * map, which might have already reserved some
582 		 * of the regions claimed by the pci devices.
583 		 */
584 	//	print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
585 		if(a != 0)
586 			mapfree(&rmapupa, a, size);
587 	}
588 }
589