1 /*
2 * To do:
3 * find a purpose for this...
4 */
5 #include "u.h"
6 #include "../port/lib.h"
7 #include "mem.h"
8 #include "dat.h"
9 #include "fns.h"
10
11 enum {
12 AsmNONE = 0,
13 AsmMEMORY = 1,
14 AsmRESERVED = 2,
15 AsmACPIRECLAIM = 3,
16 AsmACPINVS = 4,
17
18 AsmDEV = 5,
19 };
20
21 static Lock asmlock;
22 static Asm asmarray[64] = {
23 { 0, ~0, AsmNONE, nil, },
24 };
25 static int asmindex = 1;
26 Asm* asmlist = &asmarray[0];
27 static Asm* asmfreelist;
28 static char* asmtypes[] = {
29 [AsmNONE] "none",
30 [AsmMEMORY] "mem",
31 [AsmRESERVED] "res",
32 [AsmACPIRECLAIM] "acpirecl",
33 [AsmACPINVS] "acpinvs",
34 [AsmDEV] "dev",
35 };
36
37 void
asmdump(void)38 asmdump(void)
39 {
40 Asm* asm;
41
42 print("asm: index %d:\n", asmindex);
43 for(asm = asmlist; asm != nil; asm = asm->next){
44 print(" %#P %#P %d (%P)\n",
45 asm->addr, asm->addr+asm->size,
46 asm->type, asm->size);
47 }
48 }
49
50 static Asm*
asmnew(uintmem addr,uintmem size,int type)51 asmnew(uintmem addr, uintmem size, int type)
52 {
53 Asm * asm;
54
55 if(asmfreelist != nil){
56 asm = asmfreelist;
57 asmfreelist = asm->next;
58 asm->next = nil;
59 }
60 else{
61 if(asmindex >= nelem(asmarray))
62 return nil;
63 asm = &asmarray[asmindex++];
64 }
65 asm->addr = addr;
66 asm->size = size;
67 asm->type = type;
68
69 return asm;
70 }
71
72 int
asmfree(uintmem addr,uintmem size,int type)73 asmfree(uintmem addr, uintmem size, int type)
74 {
75 Asm *np, *pp, **ppp;
76
77 DBG("asmfree: %#P@%#P, type %d\n", size, addr, type);
78 if(size == 0)
79 return 0;
80
81 lock(&asmlock);
82
83 /*
84 * Find either a map entry with an address greater
85 * than that being returned, or the end of the map.
86 */
87 pp = nil;
88 ppp = &asmlist;
89 for(np = *ppp; np != nil && np->addr <= addr; np = np->next){
90 pp = np;
91 ppp = &np->next;
92 }
93
94 if((pp != nil && pp->addr+pp->size > addr)
95 || (np != nil && addr+size > np->addr)){
96 unlock(&asmlock);
97 DBG("asmfree: overlap %#P@%#P, type %d\n", size, addr, type);
98 return -1;
99 }
100
101 if(pp != nil && pp->type == type && pp->addr+pp->size == addr){
102 pp->size += size;
103 if(np != nil && np->type == type && addr+size == np->addr){
104 pp->size += np->size;
105 pp->next = np->next;
106
107 np->next = asmfreelist;
108 asmfreelist = np;
109 }
110
111 unlock(&asmlock);
112 return 0;
113 }
114
115 if(np != nil && np->type == type && addr+size == np->addr){
116 np->addr -= size;
117 np->size += size;
118
119 unlock(&asmlock);
120 return 0;
121 }
122
123 if((pp = asmnew(addr, size, type)) == nil){
124 unlock(&asmlock);
125 DBG("asmfree: losing %#P@%#P, type %d\n", size, addr, type);
126 return -1;
127 }
128 *ppp = pp;
129 pp->next = np;
130
131 unlock(&asmlock);
132
133 return 0;
134 }
135
136 uintmem
asmalloc(uintmem addr,uintmem size,int type,int align)137 asmalloc(uintmem addr, uintmem size, int type, int align)
138 {
139 uintmem a, o;
140 Asm *asm, *pp;
141
142 DBG("asmalloc: %#P@%#P, type %d\n", size, addr, type);
143 lock(&asmlock);
144 for(pp = nil, asm = asmlist; asm != nil; pp = asm, asm = asm->next){
145 if(asm->type != type)
146 continue;
147 a = asm->addr;
148
149 if(addr != 0){
150 /*
151 * A specific address range has been given:
152 * if the current map entry is greater then
153 * the address is not in the map;
154 * if the current map entry does not overlap
155 * the beginning of the requested range then
156 * continue on to the next map entry;
157 * if the current map entry does not entirely
158 * contain the requested range then the range
159 * is not in the map.
160 * The comparisons are strange to prevent
161 * overflow.
162 */
163 if(a > addr)
164 break;
165 if(asm->size < addr - a)
166 continue;
167 if(addr - a > asm->size - size)
168 break;
169 a = addr;
170 }
171
172 if(align > 0)
173 a = ((a+align-1)/align)*align;
174 if(asm->addr+asm->size-a < size)
175 continue;
176
177 o = asm->addr;
178 asm->addr = a+size;
179 asm->size -= a-o+size;
180 if(asm->size == 0){
181 if(pp != nil)
182 pp->next = asm->next;
183 asm->next = asmfreelist;
184 asmfreelist = asm;
185 }
186
187 unlock(&asmlock);
188 if(o != a)
189 asmfree(o, a-o, type);
190 return a;
191 }
192 unlock(&asmlock);
193
194 return 0;
195 }
196
197 static void
asminsert(uintmem addr,uintmem size,int type)198 asminsert(uintmem addr, uintmem size, int type)
199 {
200 if(type == AsmNONE || asmalloc(addr, size, AsmNONE, 0) == 0)
201 return;
202 if(asmfree(addr, size, type) == 0)
203 return;
204 asmfree(addr, size, 0);
205 }
206
207 void
asminit(void)208 asminit(void)
209 {
210 sys->pmstart = ROUNDUP(PADDR(end), PGSZ);
211 sys->pmend = sys->pmstart;
212 asmalloc(0, sys->pmstart, AsmNONE, 0);
213 }
214
215 /*
216 * Notes:
217 * asmmapinit and asmmodinit called from multiboot;
218 * subject to change; the numerology here is probably suspect.
219 * Multiboot defines the alignment of modules as 4096.
220 */
221 void
asmmapinit(uintmem addr,uintmem size,int type)222 asmmapinit(uintmem addr, uintmem size, int type)
223 {
224 DBG("asmmapinit %#P %#P %s\n", addr, size, asmtypes[type]);
225
226 switch(type){
227 default:
228 asminsert(addr, size, type);
229 break;
230 case AsmMEMORY:
231 /*
232 * Adjust things for the peculiarities of this
233 * architecture.
234 * Sys->pmend is the largest physical memory address found,
235 * there may be gaps between it and sys->pmstart, the range
236 * and how much of it is occupied, might need to be known
237 * for setting up allocators later.
238 */
239 if(addr < 1*MiB || addr+size < sys->pmstart)
240 break;
241 if(addr < sys->pmstart){
242 size -= sys->pmstart - addr;
243 addr = sys->pmstart;
244 }
245 asminsert(addr, size, type);
246 sys->pmoccupied += size;
247 if(addr+size > sys->pmend)
248 sys->pmend = addr+size;
249 break;
250 }
251 }
252
253 void
asmmodinit(u32int start,u32int end,char * s)254 asmmodinit(u32int start, u32int end, char* s)
255 {
256 DBG("asmmodinit: %#ux -> %#ux: <%s> %#ux\n",
257 start, end, s, ROUNDUP(end, 4096));
258
259 if(start < sys->pmstart)
260 return;
261 end = ROUNDUP(end, 4096);
262 if(end > sys->pmstart){
263 asmalloc(sys->pmstart, end-sys->pmstart, AsmNONE, 0);
264 sys->pmstart = end;
265 }
266 }
267
268 static PTE
asmwalkalloc(usize size)269 asmwalkalloc(usize size)
270 {
271 uintmem pa;
272
273 assert(size == PTSZ && sys->vmunused+size <= sys->vmunmapped);
274
275 if((pa = mmuphysaddr(sys->vmunused)) != ~0)
276 sys->vmunused += size;
277
278 return pa;
279 }
280
281 #include "amd64.h"
282
283 static int npg[4];
284
285 void
asmmeminit(void)286 asmmeminit(void)
287 {
288 Asm* asm;
289 PTE *pte;
290 int i, l;
291 uintptr n, va;
292 uintmem hi, lo, mem, nextmem, pa;
293
294 /*
295 * to do here (done?):
296 * map between vmunmapped and vmend to kzero;
297 * (should the sys->vm* things be physical after all?)
298 * adjust sys->vm things and asmalloc to compensate;
299 * run through asmlist and map to kseg2.
300 * do we need a map, like vmap, for best use of mapping kmem?
301 * - in fact, a rewritten pdmap could do the job, no?
302 * have to assume up to vmend is contiguous.
303 * can't mmuphysaddr(sys->vmunmapped) because...
304 *
305 * Assume already 2MiB aligned; currently this is done in mmuinit.
306 */
307 assert(m->pgszlg2[1] == 21);
308 assert(!((sys->vmunmapped|sys->vmend) & m->pgszmask[1]));
309
310 if((pa = mmuphysaddr(sys->vmunused)) == ~0)
311 panic("asmmeminit 1");
312 pa += sys->vmunmapped - sys->vmunused;
313 mem = asmalloc(pa, sys->vmend - sys->vmunmapped, AsmMEMORY, 0);
314 if(mem != pa)
315 panic("asmmeminit 2");
316 DBG("asmmeminit: mem %#P\n", mem);
317
318 while(sys->vmunmapped < sys->vmend){
319 l = mmuwalk(sys->vmunmapped, 1, &pte, asmwalkalloc);
320 DBG("asmmeminit: %#p l %d\n", sys->vmunmapped, l); USED(l);
321 *pte = pa|PtePS|PteRW|PteP;
322 sys->vmunmapped += 2*MiB;
323 pa += 2*MiB;
324 }
325
326 for(asm = asmlist; asm != nil; asm = asm->next){
327 if(asm->type != AsmMEMORY)
328 continue;
329 va = KSEG2+asm->addr;
330 DBG(" %#P %#P %s (%P) va %#p\n",
331 asm->addr, asm->addr+asm->size,
332 asmtypes[asm->type], asm->size, va);
333
334 lo = asm->addr;
335 hi = asm->addr+asm->size;
336 /* Convert a range into pages */
337 for(mem = lo; mem < hi; mem = nextmem){
338 nextmem = (mem + PGLSZ(0)) & ~m->pgszmask[0];
339
340 /* Try large pages first */
341 for(i = m->npgsz - 1; i >= 0; i--){
342 if((mem & m->pgszmask[i]) != 0)
343 continue;
344 if(mem + PGLSZ(i) > hi)
345 continue;
346
347 /*
348 * This page fits entirely within the range,
349 * mark it as usable.
350 */
351 if((l = mmuwalk(va, i, &pte, asmwalkalloc)) < 0)
352 panic("asmmeminit 3");
353
354 *pte = mem|PteRW|PteP;
355 if(l > 0)
356 *pte |= PtePS;
357
358 nextmem = mem + PGLSZ(i);
359 va += PGLSZ(i);
360 npg[i]++;
361 break;
362 }
363 }
364
365 lo = ROUNDUP(asm->addr, PGSZ);
366 asm->base = lo;
367 hi = ROUNDDN(hi, PGSZ);
368 asm->limit = hi;
369 asm->kbase = PTR2UINT(KADDR(asm->base));
370 }
371
372 n = sys->vmend - sys->vmstart; /* close enough */
373 if(n > 600ull*MiB)
374 n = 600ull*MiB;
375 ialloclimit(n/3);
376 }
377
378 void
asmumeminit(void)379 asmumeminit(void)
380 {
381 Asm *asm;
382 extern void physallocdump(void);
383
384 for(asm = asmlist; asm != nil; asm = asm->next){
385 if(asm->type != AsmMEMORY)
386 continue;
387 physinit(asm->addr, asm->size);
388 sys->pmpaged += ROUNDDN(asm->limit, 2*MiB) - ROUNDUP(asm->base, 2*MiB);
389 }
390 physallocdump();
391 }