xref: /netbsd-src/sys/arch/powerpc/booke/booke_pmap.c (revision 7a6a7ae08ac6c612f0fbb0d4425825c6be2a9050)
1 /*-
2  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
7  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
8  *
9  * This material is based upon work supported by the Defense Advanced Research
10  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
11  * Contract No. N66001-09-C-2073.
12  * Approved for Public Release, Distribution Unlimited
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #define _PMAP_PRIVATE
37 
38 #include <sys/cdefs.h>
39 
40 __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.3 2011/02/17 13:55:44 matt Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/kcore.h>
44 #include <sys/buf.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/pmap.h>
49 
50 /*
51  * Initialize the kernel pmap.
52  */
53 #ifdef MULTIPROCESSOR
54 #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[MAXCPUS])
55 #else
56 #define	PMAP_SIZE	sizeof(struct pmap)
57 #endif
58 
59 CTASSERT(sizeof(struct pmap_segtab) == NBPG);
60 
61 void
62 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
63 {
64 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
65 	vsize_t off = va & PAGE_SIZE;
66 
67 	kpreempt_disable();
68 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
69 		const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
70 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
71 		if (ptep == NULL) {
72 			va = segeva;
73 			continue;
74 		}
75 		pt_entry_t pt_entry = *ptep;
76 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
77 			va = segeva;
78 			continue;
79 		}
80 		kpreempt_enable();
81 		dcache_wb(pte_to_paddr(pt_entry), segeva - va);
82 		icache_inv(pte_to_paddr(pt_entry), segeva - va);
83 		kpreempt_disable();
84 		va = segeva;
85 	}
86 	kpreempt_enable();
87 }
88 
89 void
90 pmap_md_page_syncicache(struct vm_page *pg)
91 {
92 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
93 	dcache_wb_page(pa);
94 	icache_inv_page(pa);
95 }
96 
97 vaddr_t
98 pmap_md_direct_map_paddr(paddr_t pa)
99 {
100 	return (vaddr_t) pa;
101 }
102 
103 bool
104 pmap_md_direct_mapped_vaddr_p(vaddr_t va)
105 {
106 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
107 }
108 
109 paddr_t
110 pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
111 {
112 	return (paddr_t) va;
113 }
114 
115 /*
116  *	Bootstrap the system enough to run with virtual memory.
117  *	firstaddr is the first unused kseg0 address (not page aligned).
118  */
119 void
120 pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
121 	const phys_ram_seg_t *avail, size_t cnt)
122 {
123 	for (size_t i = 0; i < cnt; i++) {
124 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
125 		    atop(avail[i].start),
126 		    atop(avail[i].start + avail[i].size) - 1,
127 		    atop(avail[i].start),
128 		    atop(avail[i].start + avail[i].size) - 1,
129 		    VM_FREELIST_DEFAULT);
130 		uvm_page_physload(
131 		    atop(avail[i].start),
132 		    atop(avail[i].start + avail[i].size) - 1,
133 		    atop(avail[i].start),
134 		    atop(avail[i].start + avail[i].size) - 1,
135 		    VM_FREELIST_DEFAULT);
136 	}
137 
138 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
139 
140 	/*
141 	 * Compute the number of pages kmem_map will have.
142 	 */
143 	kmeminit_nkmempages();
144 
145 	/*
146 	 * Figure out how many PTE's are necessary to map the kernel.
147 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
148 	 */
149 
150 	/* Get size of buffer cache and set an upper limit */
151 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
152 	vsize_t bufsz = buf_memcalc();
153 	buf_setvalimit(bufsz);
154 
155 	vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
156 	    + (ubc_nwins << ubc_winshift)
157 	    + bufsz
158 	    + 16 * NCARGS
159 	    + pager_map_size
160 	    + maxproc * USPACE
161 #ifdef SYSVSHM
162 	    + NBPG * shminfo.shmall
163 #endif
164 	    + NBPG * nkmempages);
165 
166 	/*
167 	 * Initialize `FYI' variables.	Note we're relying on
168 	 * the fact that BSEARCH sorts the vm_physmem[] array
169 	 * for us.  Must do this before uvm_pageboot_alloc()
170 	 * can be called.
171 	 */
172 	pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
173 	pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
174 	const vsize_t max_nsegtabs =
175 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
176 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
177 	if (nsegtabs >= max_nsegtabs) {
178 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
179 		nsegtabs = max_nsegtabs;
180 	} else {
181 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
182 		    + nsegtabs * NBSEG;
183 	}
184 
185 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
186 
187 	/*
188 	 * Now actually allocate the kernel PTE array (must be done
189 	 * after virtual_end is initialized).
190 	 */
191 	vaddr_t segtabs =
192 	    uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
193 
194 	/*
195 	 * Initialize the kernel's two-level page level.  This only wastes
196 	 * an extra page for the segment table and allows the user/kernel
197 	 * access to be common.
198 	 */
199 	struct pmap_segtab * const stp = (void *)segtabs;
200 	segtabs += round_page(sizeof(struct pmap_segtab));
201 	pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
202 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
203 		*ptp++ = (void *)segtabs;
204 	}
205 	pmap_kernel()->pm_segtab = stp;
206 	curcpu()->ci_pmap_kern_segtab = stp;
207 	printf(" kern_segtab=%p", stp);
208 
209 #if 0
210 	nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
211 	segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
212 	ptp = stp->seg_tab;
213 	pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
214 	pt_entry_t *ptep = (void *)segtabs;
215 	printf("%s: allocated %lu page table pages for mapping %u pages\n",
216 	    __func__, nsegtabs, physmem);
217 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
218 		*ptp = ptep;
219 		for (u_int j = 0; j < NPTEPG; j++, ptep++) {
220 			*ptep = pt_entry;
221 			pt_entry += NBPG;
222 		}
223 		printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
224 		pt_entry |= PTE_xW;
225 		pt_entry &= ~PTE_xX;
226 	}
227 
228 	/*
229 	 * Now make everything before the kernel inaccessible.
230 	 */
231 	for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
232 		stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
233 	}
234 #endif
235 
236 	/*
237 	 * Initialize the pools.
238 	 */
239 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
240 	    &pool_allocator_nointr, IPL_NONE);
241 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
242 	    &pmap_pv_page_allocator, IPL_NONE);
243 
244 	tlb_set_asid(0);
245 }
246 
247 struct vm_page *
248 pmap_md_alloc_poolpage(int flags)
249 {
250 	/*
251 	 * Any managed page works for us.
252 	 */
253 	return uvm_pagealloc(NULL, 0, NULL, flags);
254 }
255 
256 void
257 pmap_zero_page(paddr_t pa)
258 {
259 //	printf("%s(%#lx): calling dcache_zero_page(%#lx)\n", __func__, pa, pa);
260 	dcache_zero_page(pa);
261 }
262 
263 void
264 pmap_copy_page(paddr_t src, paddr_t dst)
265 {
266 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
267 	const paddr_t end = src + PAGE_SIZE;
268 
269 	while (src < end) {
270 		__asm(
271 			"dcbt	%2,%1"	"\n\t"	/* touch next src cachline */
272 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
273 		    :: "b"(src), "b"(dst), "b"(line_size));
274 		for (u_int i = 0;
275 		     i < line_size;
276 		     src += 32, dst += 32, i += 32) {
277 			__asm(
278 				"lmw	24,0(%0)" "\n\t"
279 				"stmw	24,0(%1)"
280 			    :: "b"(src), "b"(dst)
281 			    : "r24", "r25", "r26", "r27",
282 			      "r28", "r29", "r30", "r31");
283 		}
284 	}
285 }
286 
287 void
288 pmap_md_init(void)
289 {
290 
291 	/* nothing for now */
292 }
293 
294 bool
295 pmap_md_io_vaddr_p(vaddr_t va)
296 {
297 	return va >= pmap_limits.avail_end
298 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
299 }
300 
301