xref: /minix3/minix/kernel/arch/i386/pg_utils.c (revision dd41186aac5f9c05e657f127b7e5d33f375d1686)
1 
2 #include <minix/cpufeature.h>
3 
4 #include <assert.h>
5 #include "kernel/kernel.h"
6 #include "arch_proto.h"
7 
8 #include <string.h>
9 
10 /* These are set/computed in kernel.lds. */
11 extern char _kern_vir_base, _kern_phys_base, _kern_size;
12 
13 /* Retrieve the absolute values to something we can use. */
14 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
15 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
16 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
17 
18 /* page directory we can use to map things */
19 static u32_t pagedir[1024]  __aligned(4096);
20 
21 void print_memmap(kinfo_t *cbi)
22 {
23         int m;
24         assert(cbi->mmap_size < MAXMEMMAP);
25         for(m = 0; m < cbi->mmap_size; m++) {
26 		phys_bytes addr = cbi->memmap[m].mm_base_addr, endit = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
27                 printf("%08lx-%08lx ",addr, endit);
28         }
29         printf("\nsize %08lx\n", cbi->mmap_size);
30 }
31 
32 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
33 {
34         int m;
35         phys_bytes o;
36 
37         if((o=start % I386_PAGE_SIZE))
38                 start -= o;
39         if((o=end % I386_PAGE_SIZE))
40                 end += I386_PAGE_SIZE - o;
41 
42 	assert(kernel_may_alloc);
43 
44         for(m = 0; m < cbi->mmap_size; m++) {
45                 phys_bytes substart = start, subend = end;
46                 phys_bytes memaddr = cbi->memmap[m].mm_base_addr,
47                         memend = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
48 
49                 /* adjust cut range to be a subset of the free memory */
50                 if(substart < memaddr) substart = memaddr;
51                 if(subend > memend) subend = memend;
52                 if(substart >= subend) continue;
53 
54                 /* if there is any overlap, forget this one and add
55                  * 1-2 subranges back
56                  */
57                 cbi->memmap[m].mm_base_addr = cbi->memmap[m].mm_length = 0;
58                 if(substart > memaddr)
59                         add_memmap(cbi, memaddr, substart-memaddr);
60                 if(subend < memend)
61                         add_memmap(cbi, subend, memend-subend);
62         }
63 }
64 
65 phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
66 {
67 	/* Allocate the lowest physical page we have. */
68 	int m;
69 #define EMPTY 0xffffffff
70 	phys_bytes lowest = EMPTY;
71 	assert(len > 0);
72 	len = roundup(len, I386_PAGE_SIZE);
73 
74 	assert(kernel_may_alloc);
75 
76 	for(m = 0; m < cbi->mmap_size; m++) {
77 		if(cbi->memmap[m].mm_length < len) continue;
78 		if(cbi->memmap[m].mm_base_addr < lowest) lowest = cbi->memmap[m].mm_base_addr;
79 	}
80 	assert(lowest != EMPTY);
81 	cut_memmap(cbi, lowest, len);
82 	cbi->kernel_allocated_bytes_dynamic += len;
83 	return lowest;
84 }
85 
86 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
87 {
88         int m;
89 #define LIMIT 0xFFFFF000
90         /* Truncate available memory at 4GB as the rest of minix
91          * currently can't deal with any bigger.
92          */
93         if(addr > LIMIT) return;
94         if(addr + len > LIMIT) {
95                 len -= (addr + len - LIMIT);
96         }
97         assert(cbi->mmap_size < MAXMEMMAP);
98         if(len == 0) return;
99 	addr = roundup(addr, I386_PAGE_SIZE);
100 	len = rounddown(len, I386_PAGE_SIZE);
101 
102 	assert(kernel_may_alloc);
103 
104         for(m = 0; m < MAXMEMMAP; m++) {
105 		phys_bytes highmark;
106                 if(cbi->memmap[m].mm_length) continue;
107                 cbi->memmap[m].mm_base_addr = addr;
108                 cbi->memmap[m].mm_length = len;
109                 cbi->memmap[m].mm_type = MULTIBOOT_MEMORY_AVAILABLE;
110                 if(m >= cbi->mmap_size)
111                         cbi->mmap_size = m+1;
112 		highmark = addr + len;
113 		if(highmark > cbi->mem_high_phys) {
114 			cbi->mem_high_phys = highmark;
115 		}
116 
117                 return;
118         }
119 
120         panic("no available memmap slot");
121 }
122 
123 u32_t *alloc_pagetable(phys_bytes *ph)
124 {
125 	u32_t *ret;
126 #define PG_PAGETABLES 6
127 	static u32_t pagetables[PG_PAGETABLES][1024]  __aligned(4096);
128 	static int pt_inuse = 0;
129 	if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
130 	assert(sizeof(pagetables[pt_inuse]) == I386_PAGE_SIZE);
131 	ret = pagetables[pt_inuse++];
132 	*ph = vir2phys(ret);
133 	return ret;
134 }
135 
136 #define PAGE_KB (I386_PAGE_SIZE / 1024)
137 
138 phys_bytes pg_alloc_page(kinfo_t *cbi)
139 {
140 	int m;
141 	multiboot_memory_map_t *mmap;
142 
143 	assert(kernel_may_alloc);
144 
145 	for(m = cbi->mmap_size-1; m >= 0; m--) {
146 		mmap = &cbi->memmap[m];
147 		if(!mmap->mm_length) continue;
148 		assert(mmap->mm_length > 0);
149 		assert(!(mmap->mm_length % I386_PAGE_SIZE));
150 		assert(!(mmap->mm_base_addr % I386_PAGE_SIZE));
151 
152 		mmap->mm_length -= I386_PAGE_SIZE;
153 
154                 cbi->kernel_allocated_bytes_dynamic += I386_PAGE_SIZE;
155 
156 		return mmap->mm_base_addr + mmap->mm_length;
157 	}
158 
159 	panic("can't find free memory");
160 }
161 
162 void pg_identity(kinfo_t *cbi)
163 {
164 	uint32_t i;
165 	phys_bytes phys;
166 
167 	/* We map memory that does not correspond to physical memory
168 	 * as non-cacheable. Make sure we know what it is.
169 	 */
170 	assert(cbi->mem_high_phys);
171 
172         /* Set up an identity mapping page directory */
173         for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
174 		u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE
175 			| I386_VM_USER
176 			| I386_VM_WRITE;
177                 phys = i * I386_BIG_PAGE_SIZE;
178 		if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
179 			<= (phys & I386_VM_ADDR_MASK_4MB)) {
180 			flags |= I386_VM_PWT | I386_VM_PCD;
181 		}
182                 pagedir[i] =  phys | flags;
183         }
184 }
185 
186 int pg_mapkernel(void)
187 {
188 	int pde;
189 	u32_t mapped = 0, kern_phys = kern_phys_start;
190 
191         assert(!(kern_vir_start % I386_BIG_PAGE_SIZE));
192         assert(!(kern_phys % I386_BIG_PAGE_SIZE));
193         pde = kern_vir_start / I386_BIG_PAGE_SIZE; /* start pde */
194 	while(mapped < kern_kernlen) {
195 	        pagedir[pde] = kern_phys | I386_VM_PRESENT |
196 			I386_VM_BIGPAGE | I386_VM_WRITE;
197 		mapped += I386_BIG_PAGE_SIZE;
198 		kern_phys += I386_BIG_PAGE_SIZE;
199 		pde++;
200 	}
201 	return pde;	/* free pde */
202 }
203 
204 void vm_enable_paging(void)
205 {
206         u32_t cr0, cr4;
207         int pgeok;
208 
209         pgeok = _cpufeature(_CPUF_I386_PGE);
210 
211 #ifdef PAE
212 	if(_cpufeature(_CPUF_I386_PAE) == 0)
213 		panic("kernel built with PAE support, CPU seems to lack PAE support?\n");
214 #endif
215 
216         cr0= read_cr0();
217         cr4= read_cr4();
218 
219 	/* The boot loader should have put us in protected mode. */
220 	assert(cr0 & I386_CR0_PE);
221 
222         /* First clear PG and PGE flag, as PGE must be enabled after PG. */
223         write_cr0(cr0 & ~I386_CR0_PG);
224         write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
225 
226         cr0= read_cr0();
227         cr4= read_cr4();
228 
229         /* Our page table contains 4MB entries. */
230         cr4 |= I386_CR4_PSE;
231 
232         write_cr4(cr4);
233 
234         /* First enable paging, then enable global page flag. */
235         cr0 |= I386_CR0_PG;
236         write_cr0(cr0);
237         cr0 |= I386_CR0_WP;
238         write_cr0(cr0);
239 
240         /* May we enable these features? */
241         if(pgeok)
242                 cr4 |= I386_CR4_PGE;
243 
244         write_cr4(cr4);
245 }
246 
247 phys_bytes pg_load(void)
248 {
249 	phys_bytes phpagedir = vir2phys(pagedir);
250         write_cr3(phpagedir);
251 	return phpagedir;
252 }
253 
254 void pg_clear(void)
255 {
256 	memset(pagedir, 0, sizeof(pagedir));
257 }
258 
259 phys_bytes pg_rounddown(phys_bytes b)
260 {
261 	phys_bytes o;
262 	if(!(o = b % I386_PAGE_SIZE))
263 		return b;
264 	return b  - o;
265 }
266 
267 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
268 	kinfo_t *cbi)
269 {
270 	static int mapped_pde = -1;
271 	static u32_t *pt = NULL;
272 	int pde, pte;
273 
274 	assert(kernel_may_alloc);
275 
276 	if(phys == PG_ALLOCATEME) {
277 		assert(!(vaddr % I386_PAGE_SIZE));
278 	} else  {
279 		assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE));
280 		vaddr = pg_rounddown(vaddr);
281 		phys = pg_rounddown(phys);
282 	}
283 	assert(vaddr < kern_vir_start);
284 
285 	while(vaddr < vaddr_end) {
286 		phys_bytes source = phys;
287 		assert(!(vaddr % I386_PAGE_SIZE));
288 		if(phys == PG_ALLOCATEME) {
289 			source = pg_alloc_page(cbi);
290 		} else {
291 			assert(!(phys % I386_PAGE_SIZE));
292 		}
293 		assert(!(source % I386_PAGE_SIZE));
294 		pde = I386_VM_PDE(vaddr);
295 		pte = I386_VM_PTE(vaddr);
296 		if(mapped_pde < pde) {
297 			phys_bytes ph;
298 			pt = alloc_pagetable(&ph);
299 			pagedir[pde] = (ph & I386_VM_ADDR_MASK)
300 		                | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
301 			mapped_pde = pde;
302 		}
303 		assert(pt);
304 		pt[pte] = (source & I386_VM_ADDR_MASK) |
305 			I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
306 		vaddr += I386_PAGE_SIZE;
307 		if(phys != PG_ALLOCATEME)
308 			phys += I386_PAGE_SIZE;
309 	}
310 }
311 
312 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
313 {
314 	*pagedir_ph = vir2phys(pagedir);
315 	*pagedir_v = pagedir;
316 }
317 
318