1 2 #include <minix/cpufeature.h> 3 4 #include <assert.h> 5 #include "kernel/kernel.h" 6 #include "arch_proto.h" 7 8 #include <string.h> 9 10 /* These are set/computed in kernel.lds. */ 11 extern char _kern_vir_base, _kern_phys_base, _kern_size; 12 13 /* Retrieve the absolute values to something we can use. */ 14 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base; 15 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base; 16 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size; 17 18 /* page directory we can use to map things */ 19 static u32_t pagedir[1024] __aligned(4096); 20 21 void print_memmap(kinfo_t *cbi) 22 { 23 int m; 24 assert(cbi->mmap_size < MAXMEMMAP); 25 for(m = 0; m < cbi->mmap_size; m++) { 26 phys_bytes addr = cbi->memmap[m].mm_base_addr, endit = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length; 27 printf("%08lx-%08lx ",addr, endit); 28 } 29 printf("\nsize %08lx\n", cbi->mmap_size); 30 } 31 32 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end) 33 { 34 int m; 35 phys_bytes o; 36 37 if((o=start % I386_PAGE_SIZE)) 38 start -= o; 39 if((o=end % I386_PAGE_SIZE)) 40 end += I386_PAGE_SIZE - o; 41 42 assert(kernel_may_alloc); 43 44 for(m = 0; m < cbi->mmap_size; m++) { 45 phys_bytes substart = start, subend = end; 46 phys_bytes memaddr = cbi->memmap[m].mm_base_addr, 47 memend = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length; 48 49 /* adjust cut range to be a subset of the free memory */ 50 if(substart < memaddr) substart = memaddr; 51 if(subend > memend) subend = memend; 52 if(substart >= subend) continue; 53 54 /* if there is any overlap, forget this one and add 55 * 1-2 subranges back 56 */ 57 cbi->memmap[m].mm_base_addr = cbi->memmap[m].mm_length = 0; 58 if(substart > memaddr) 59 add_memmap(cbi, memaddr, substart-memaddr); 60 if(subend < memend) 61 add_memmap(cbi, subend, memend-subend); 62 } 63 } 64 65 phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len) 66 { 67 /* Allocate the lowest physical page we have. */ 68 int m; 69 #define EMPTY 0xffffffff 70 phys_bytes lowest = EMPTY; 71 assert(len > 0); 72 len = roundup(len, I386_PAGE_SIZE); 73 74 assert(kernel_may_alloc); 75 76 for(m = 0; m < cbi->mmap_size; m++) { 77 if(cbi->memmap[m].mm_length < len) continue; 78 if(cbi->memmap[m].mm_base_addr < lowest) lowest = cbi->memmap[m].mm_base_addr; 79 } 80 assert(lowest != EMPTY); 81 cut_memmap(cbi, lowest, len); 82 cbi->kernel_allocated_bytes_dynamic += len; 83 return lowest; 84 } 85 86 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len) 87 { 88 int m; 89 #define LIMIT 0xFFFFF000 90 /* Truncate available memory at 4GB as the rest of minix 91 * currently can't deal with any bigger. 92 */ 93 if(addr > LIMIT) return; 94 if(addr + len > LIMIT) { 95 len -= (addr + len - LIMIT); 96 } 97 assert(cbi->mmap_size < MAXMEMMAP); 98 if(len == 0) return; 99 addr = roundup(addr, I386_PAGE_SIZE); 100 len = rounddown(len, I386_PAGE_SIZE); 101 102 assert(kernel_may_alloc); 103 104 for(m = 0; m < MAXMEMMAP; m++) { 105 phys_bytes highmark; 106 if(cbi->memmap[m].mm_length) continue; 107 cbi->memmap[m].mm_base_addr = addr; 108 cbi->memmap[m].mm_length = len; 109 cbi->memmap[m].mm_type = MULTIBOOT_MEMORY_AVAILABLE; 110 if(m >= cbi->mmap_size) 111 cbi->mmap_size = m+1; 112 highmark = addr + len; 113 if(highmark > cbi->mem_high_phys) { 114 cbi->mem_high_phys = highmark; 115 } 116 117 return; 118 } 119 120 panic("no available memmap slot"); 121 } 122 123 u32_t *alloc_pagetable(phys_bytes *ph) 124 { 125 u32_t *ret; 126 #define PG_PAGETABLES 6 127 static u32_t pagetables[PG_PAGETABLES][1024] __aligned(4096); 128 static int pt_inuse = 0; 129 if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables"); 130 assert(sizeof(pagetables[pt_inuse]) == I386_PAGE_SIZE); 131 ret = pagetables[pt_inuse++]; 132 *ph = vir2phys(ret); 133 return ret; 134 } 135 136 #define PAGE_KB (I386_PAGE_SIZE / 1024) 137 138 phys_bytes pg_alloc_page(kinfo_t *cbi) 139 { 140 int m; 141 multiboot_memory_map_t *mmap; 142 143 assert(kernel_may_alloc); 144 145 for(m = cbi->mmap_size-1; m >= 0; m--) { 146 mmap = &cbi->memmap[m]; 147 if(!mmap->mm_length) continue; 148 assert(mmap->mm_length > 0); 149 assert(!(mmap->mm_length % I386_PAGE_SIZE)); 150 assert(!(mmap->mm_base_addr % I386_PAGE_SIZE)); 151 152 mmap->mm_length -= I386_PAGE_SIZE; 153 154 cbi->kernel_allocated_bytes_dynamic += I386_PAGE_SIZE; 155 156 return mmap->mm_base_addr + mmap->mm_length; 157 } 158 159 panic("can't find free memory"); 160 } 161 162 void pg_identity(kinfo_t *cbi) 163 { 164 uint32_t i; 165 phys_bytes phys; 166 167 /* We map memory that does not correspond to physical memory 168 * as non-cacheable. Make sure we know what it is. 169 */ 170 assert(cbi->mem_high_phys); 171 172 /* Set up an identity mapping page directory */ 173 for(i = 0; i < I386_VM_DIR_ENTRIES; i++) { 174 u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE 175 | I386_VM_USER 176 | I386_VM_WRITE; 177 phys = i * I386_BIG_PAGE_SIZE; 178 if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB) 179 <= (phys & I386_VM_ADDR_MASK_4MB)) { 180 flags |= I386_VM_PWT | I386_VM_PCD; 181 } 182 pagedir[i] = phys | flags; 183 } 184 } 185 186 int pg_mapkernel(void) 187 { 188 int pde; 189 u32_t mapped = 0, kern_phys = kern_phys_start; 190 191 assert(!(kern_vir_start % I386_BIG_PAGE_SIZE)); 192 assert(!(kern_phys % I386_BIG_PAGE_SIZE)); 193 pde = kern_vir_start / I386_BIG_PAGE_SIZE; /* start pde */ 194 while(mapped < kern_kernlen) { 195 pagedir[pde] = kern_phys | I386_VM_PRESENT | 196 I386_VM_BIGPAGE | I386_VM_WRITE; 197 mapped += I386_BIG_PAGE_SIZE; 198 kern_phys += I386_BIG_PAGE_SIZE; 199 pde++; 200 } 201 return pde; /* free pde */ 202 } 203 204 void vm_enable_paging(void) 205 { 206 u32_t cr0, cr4; 207 int pgeok; 208 209 pgeok = _cpufeature(_CPUF_I386_PGE); 210 211 cr0= read_cr0(); 212 cr4= read_cr4(); 213 214 /* The boot loader should have put us in protected mode. */ 215 assert(cr0 & I386_CR0_PE); 216 217 /* First clear PG and PGE flag, as PGE must be enabled after PG. */ 218 write_cr0(cr0 & ~I386_CR0_PG); 219 write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE)); 220 221 cr0= read_cr0(); 222 cr4= read_cr4(); 223 224 /* Our page table contains 4MB entries. */ 225 cr4 |= I386_CR4_PSE; 226 227 write_cr4(cr4); 228 229 /* First enable paging, then enable global page flag. */ 230 cr0 |= I386_CR0_PG; 231 write_cr0(cr0); 232 cr0 |= I386_CR0_WP; 233 write_cr0(cr0); 234 235 /* May we enable these features? */ 236 if(pgeok) 237 cr4 |= I386_CR4_PGE; 238 239 write_cr4(cr4); 240 } 241 242 phys_bytes pg_load(void) 243 { 244 phys_bytes phpagedir = vir2phys(pagedir); 245 write_cr3(phpagedir); 246 return phpagedir; 247 } 248 249 void pg_clear(void) 250 { 251 memset(pagedir, 0, sizeof(pagedir)); 252 } 253 254 phys_bytes pg_rounddown(phys_bytes b) 255 { 256 phys_bytes o; 257 if(!(o = b % I386_PAGE_SIZE)) 258 return b; 259 return b - o; 260 } 261 262 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end, 263 kinfo_t *cbi) 264 { 265 static int mapped_pde = -1; 266 static u32_t *pt = NULL; 267 int pde, pte; 268 269 assert(kernel_may_alloc); 270 271 if(phys == PG_ALLOCATEME) { 272 assert(!(vaddr % I386_PAGE_SIZE)); 273 } else { 274 assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE)); 275 vaddr = pg_rounddown(vaddr); 276 phys = pg_rounddown(phys); 277 } 278 assert(vaddr < kern_vir_start); 279 280 while(vaddr < vaddr_end) { 281 phys_bytes source = phys; 282 assert(!(vaddr % I386_PAGE_SIZE)); 283 if(phys == PG_ALLOCATEME) { 284 source = pg_alloc_page(cbi); 285 } else { 286 assert(!(phys % I386_PAGE_SIZE)); 287 } 288 assert(!(source % I386_PAGE_SIZE)); 289 pde = I386_VM_PDE(vaddr); 290 pte = I386_VM_PTE(vaddr); 291 if(mapped_pde < pde) { 292 phys_bytes ph; 293 pt = alloc_pagetable(&ph); 294 pagedir[pde] = (ph & I386_VM_ADDR_MASK) 295 | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE; 296 mapped_pde = pde; 297 } 298 assert(pt); 299 pt[pte] = (source & I386_VM_ADDR_MASK) | 300 I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE; 301 vaddr += I386_PAGE_SIZE; 302 if(phys != PG_ALLOCATEME) 303 phys += I386_PAGE_SIZE; 304 } 305 } 306 307 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v) 308 { 309 *pagedir_ph = vir2phys(pagedir); 310 *pagedir_v = pagedir; 311 } 312 313