xref: /inferno-os/os/js/mmu.c (revision 7ef44d652ae9e5e1f5b3465d73684e4a54de73c0)
1  #include	"u.h"
2  #include	"../port/lib.h"
3  #include	"mem.h"
4  #include	"dat.h"
5  #include	"fns.h"
6  #include	"io.h"
7  
8  typedef struct Ctx Ctx;
9  /*
10   * software description of an MMU context
11   */
12  struct Ctx
13  {
14  	Ctx	*prev;	/* less recently used */
15  	Ctx	*next;	/* more recently used */
16  	Proc	*proc;	/* process that owns this context */
17  	ushort	index;	/* which context this is */
18  };
19  
20  ulong	*ioptes;	/* IO MMU's table (shared by all processors) */
21  
22  /* offset of x into the three page table levels in a context */
23  #define AOFF(x) (((ulong)x)>>24)
24  #define BOFF(x)	((((ulong)x)>>18)&(64-1))
25  #define COFF(x)	((((ulong)x)>>12)&(64-1))
26  #define	ISPTAB(x) ((((ulong)x)&3) == PTPVALID)
27  #define	KPN(va) PPN(PADDR(va))
28  
29  #define	NIOPTE	(DMASEGSIZE/BY2PG)
30  
31  /*
32   *  allocate kernel page map and enter one mapping.  Return
33   *  address of the mapping.
34   */
35  static ulong*
36  putkmmu(ulong virt, ulong phys, int level)
37  {
38  	ulong *a, *b, *c;
39  
40  
41  	a = &PPT(m->contexts[0])[AOFF(virt)];
42  	if(level > 1) {
43  		if(*a == 0){
44  			b = (ulong*)xspanalloc(64*sizeof(ulong),
45  					       64*sizeof(ulong), 0);
46  			*a = KPN(b) | PTPVALID;
47  		} else {
48  			if(!ISPTAB(*a))
49  				panic("putkmmu virt=%lux *a=%lux", virt, *a);
50  			b = PPT(*a);
51  		}
52  		b = &b[BOFF(virt)];
53  		if(level > 2) {
54  			if(*b == 0){
55  				c = (ulong*)xspanalloc(64*sizeof(ulong),
56  						       64*sizeof(ulong), 0);
57  				*b = KPN(c) | PTPVALID;
58  			} else {
59  				if(!ISPTAB(*b))
60  					panic("putkmmu virt=%lux *b=%lux",
61  					      virt, *b);
62  				c = PPT(*b);
63  			}
64  			c = &c[COFF(virt)];
65  			*c = phys;
66  			return c;
67  		} else {
68  			*b = phys;
69  			return b;
70  		}
71  	} else {
72  		*a = phys;
73  		return a;
74  	}
75  }
76  
77  void
78  mmuinit(void)
79  {
80  	int i, n;
81  	ulong *a;
82  
83  	m->contexts = (ulong*)xspanalloc(conf.ncontext*sizeof(ulong),
84  					 conf.ncontext*sizeof(ulong),
85  					 0);
86  
87  	/*
88  	 * context 0 will have the prototype level 1 entries
89  	 */
90  	a = (ulong*)xspanalloc(256*sizeof(ulong), 256*sizeof(ulong), 0);
91  
92  	m->contexts[0] = KPN(a) | PTPVALID;
93  
94  	/*
95  	 * map all memory to KZERO
96  	 */
97  	n = 128*MB/BY2PG;
98  
99  	 /* pages to first segment boundary */
100  	for(i=0; i<(256*1024/BY2PG); i++)
101  		putkmmu(KZERO|(i*BY2PG),
102  			PPN(i*BY2PG)|PTEKERNEL|PTEWRITE|PTEVALID|PTECACHE, 3);
103  
104  	 /* segments to first 16Mb boundary */
105  	for(; i<(16*MB)/BY2PG; i += 64)
106  		putkmmu(KZERO|(i*BY2PG),
107  			PPN(i*BY2PG)|PTEKERNEL|PTEWRITE|PTEVALID|PTECACHE, 2);
108  
109  	 /* 16 Mbyte regions to end */
110  	for(; i<n; i += 64*64)
111  		putkmmu(KZERO|(i*BY2PG),
112  			PPN(i*BY2PG)|PTEKERNEL|PTEWRITE|PTEVALID|PTECACHE, 1);
113  
114  	/*
115  	 * allocate page table pages for IO mapping
116  	 */
117  	n = IOSEGSIZE/BY2PG;
118  	for(i=0; i<n; i++)
119  		putkmmu(IOSEGBASE+(i*BY2PG), 0, 3);
120  
121  	/*
122  	 * load kernel context
123  	 */
124  
125  	putrmmu(CTPR, PADDR(m->contexts)>>4);
126  	putrmmu(CXR, 0);
127  	flushtlb();
128  
129  	ioptes = (ulong*)xspanalloc(NIOPTE*sizeof(ulong), DMASEGSIZE/1024, 0);
130  	putphys(IBAR, PADDR(ioptes)>>4);
131  	putphys(IOCR, (DMARANGE<<2)|1);	/* IO MMU enable */
132  }
133  
134  
135  void
136  flushicache(void)
137  {
138  	int i;
139  	ulong addr = 0;
140  
141  	for(i=0;i<512;i++) {
142  		flushiline(addr);
143  		addr += 1<<5;
144  	}
145  }
146  
147  void
148  flushdcache(void)
149  {
150  	int i;
151  	ulong addr = 0;
152  
153  	for(i=0;i<512;i++) {
154  		flushdline(addr);
155  		addr += 1<<5;
156  	}
157  }
158  
159  int
160  segflush(void *p, ulong l)
161  {
162  	USED(p,l);
163  	flushicache();
164  	return 0;
165  }
166  
167  void
168  cacheinit(void)
169  {
170  	flushdcache();
171  	flushicache();
172  	setpcr(getpcr()|ENABCACHE);
173  }
174  
175  typedef struct Mregion Mregion;
176  struct Mregion
177  {
178  	ulong	addr;
179  	long	size;
180  };
181  
182  struct
183  {
184  	Mregion	io;
185  	Mregion	dma;
186  	Lock;
187  }kmapalloc = {
188  	{IOSEGBASE, IOSEGSIZE},
189  	{DMASEGBASE, DMASEGSIZE},
190  };
191  
192  void
193  kmapinit(void)
194  {
195  }
196  
197  KMap*
198  kmappa(ulong pa, ulong flag)
199  {
200  	ulong k;
201  
202  	lock(&kmapalloc);
203  	k = kmapalloc.io.addr;
204  	kmapalloc.io.addr += BY2PG;
205  	if((kmapalloc.io.size -= BY2PG) < 0)
206  		panic("kmappa");
207  	putkmmu(k, PPN(pa)|PTEKERNEL|PTEWRITE|PTEVALID|flag, 3);
208  	flushtlbpage(k);
209  	unlock(&kmapalloc);
210  	return (KMap*)k;
211  }
212  
213  ulong
214  kmapdma(ulong pa, ulong n)
215  {
216  	ulong va0, va;
217  	int i, j;
218  
219  
220  	lock(&kmapalloc);
221  	i = (n+(BY2PG-1))/BY2PG;
222  	va0 = kmapalloc.dma.addr;
223  	kmapalloc.dma.addr += i*BY2PG;
224  	if((kmapalloc.dma.size -= i*BY2PG) <= 0)
225  		panic("kmapdma");
226  	va = va0;
227  	for(j=0; j<i; j++) {
228  		putkmmu(va, PPN(pa)|PTEKERNEL|PTEVALID|PTEWRITE, 3);
229  		flushtlbpage(va);
230  		ioptes[(va>>PGSHIFT)&(NIOPTE-1)] = PPN(pa)|IOPTEVALID|IOPTEWRITE;
231  		va += BY2PG;
232  		pa += BY2PG;
233  	}
234  	unlock(&kmapalloc);
235  	return va0;
236  }
237  
238  /*
239   * map the frame buffer
240   */
241  ulong
242  kmapsbus(int slot)
243  {
244  	int i, n;
245  
246  	lock(&kmapalloc);
247  	n = FBSEGSIZE/BY2PG;
248  	for(i=0; i<n; i += 64*64)
249  		putkmmu(FBSEGBASE+(i*BY2PG), PPN(SBUS(slot)+(i*BY2PG))|PTEKERNEL|PTEWRITE|PTEVALID, 1);
250  	unlock(&kmapalloc);
251  	return FBSEGBASE;
252  }
253