xref: /netbsd-src/sys/arch/arc/arc/wired_map_machdep.c (revision 8ff6f65dafc58186614d149001c905dec4c934a2)
1 /*	$NetBSD: wired_map_machdep.c,v 1.9 2023/12/20 06:36:02 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (C) 2000 Shuichiro URATA.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: wired_map_machdep.c,v 1.9 2023/12/20 06:36:02 thorpej Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/vmem_impl.h>
35 
36 #include <uvm/uvm_extern.h>
37 #include <machine/cpu.h>
38 #include <machine/wired_map.h>
39 #include <machine/vmparam.h>
40 #include <mips/locore.h>
41 #include <mips/pte.h>
42 
43 static bool arc_wired_map_paddr_entry(paddr_t pa, vaddr_t *vap,
44     vsize_t *sizep);
45 static bool arc_wired_map_vaddr_entry(vaddr_t va, paddr_t *pap,
46     vsize_t *sizep);
47 
48 #define	ARC_WIRED_MAP_BTCOUNT	VMEM_EST_BTCOUNT(1, 8)
49 
50 static vmem_t *arc_wired_map_arena;
51 static struct vmem arc_wired_map_arena_store;
52 static struct vmem_btag arc_wired_map_btag_store[ARC_WIRED_MAP_BTCOUNT];
53 
54 void
arc_init_wired_map(void)55 arc_init_wired_map(void)
56 {
57 	int error __diagused;
58 
59 	mips3_nwired_page = 0;
60 
61 	arc_wired_map_arena = vmem_init(&arc_wired_map_arena_store,
62 					"wired_map",	/* name */
63 					0,		/* addr */
64 					0,		/* size */
65 					1,		/* quantum */
66 					NULL,		/* importfn */
67 					NULL,		/* releasefn */
68 					NULL,		/* source */
69 					0,		/* qcache_max */
70 					VM_NOSLEEP | VM_PRIVTAGS,
71 					IPL_NONE);
72 	KASSERT(arc_wired_map_arena != NULL);
73 
74 	vmem_add_bts(arc_wired_map_arena, arc_wired_map_btag_store,
75 	    ARC_WIRED_MAP_BTCOUNT);
76 	error = vmem_add(arc_wired_map_arena, VM_MIN_WIRED_MAP_ADDRESS,
77 	    VM_MAX_WIRED_MAP_ADDRESS - VM_MIN_WIRED_MAP_ADDRESS,
78 	    VM_NOSLEEP);
79 	KASSERT(error == 0);
80 }
81 
82 void
arc_wired_enter_page(vaddr_t va,paddr_t pa,vaddr_t pg_size)83 arc_wired_enter_page(vaddr_t va, paddr_t pa, vaddr_t pg_size)
84 {
85 	int error;
86 
87 	KASSERT((va & (pg_size - 1)) == 0);
88 
89 	if (va < VM_MIN_WIRED_MAP_ADDRESS ||
90 	    va + pg_size > VM_MAX_WIRED_MAP_ADDRESS) {
91 #ifdef DIAGNOSTIC
92 		printf("arc_wired_enter_page: invalid va range.\n");
93 #endif
94 		return;
95 	}
96 
97 	error = vmem_xalloc_addr(arc_wired_map_arena, va, pg_size, VM_NOSLEEP);
98 	if (error) {
99 #ifdef DIAGNOSTIC
100 		printf("arc_wired_enter_page: cannot allocate region.\n");
101 #endif
102 		return;
103 	}
104 
105 	mips3_wired_enter_page(va, pa, pg_size);
106 }
107 
108 static bool
arc_wired_map_paddr_entry(paddr_t pa,vaddr_t * vap,vsize_t * sizep)109 arc_wired_map_paddr_entry(paddr_t pa, vaddr_t *vap, vsize_t *sizep)
110 {
111 	vsize_t size;
112 	int n;
113 	struct wired_map_entry *entry;
114 
115 	n = mips3_nwired_page;
116 	for (entry = mips3_wired_map; --n >= 0; entry++) {
117 		size = MIPS3_PG_SIZE_MASK_TO_SIZE(entry->pgmask);
118 		if (entry->pa0 != 0 &&
119 		    pa >= entry->pa0 && pa < entry->pa0 + size) {
120 			*vap = entry->va;
121 			*sizep = size;
122 			return true;
123 		}
124 		if (entry->pa1 != 0 &&
125 		    pa >= entry->pa1 && pa < entry->pa1 + size) {
126 			*vap = entry->va + size;
127 			*sizep = size;
128 			return true;
129 		}
130 	}
131 	return false;
132 }
133 
134 /* XXX: Using tlbp makes this easier... */
135 static bool
arc_wired_map_vaddr_entry(vaddr_t va,paddr_t * pap,vsize_t * sizep)136 arc_wired_map_vaddr_entry(vaddr_t va, paddr_t *pap, vsize_t *sizep)
137 {
138 	vsize_t size;
139 	int n;
140 	struct wired_map_entry *entry;
141 
142 	n = mips3_nwired_page;
143 	for (entry = mips3_wired_map; --n >= 0; entry++) {
144 		size = MIPS3_PG_SIZE_MASK_TO_SIZE(entry->pgmask);
145 		if (va >= entry->va && va < entry->va + size * 2) {
146 			paddr_t pa = (va < entry->va + size)
147 			    ? entry->pa0 : entry->pa1;
148 
149 			if (pa != 0) {
150 				*pap = pa;
151 				*sizep = size;
152 				return true;
153 			}
154 		}
155 	}
156 	return false;
157 }
158 
159 vaddr_t
arc_contiguously_wired_mapped(paddr_t pa,vsize_t size)160 arc_contiguously_wired_mapped(paddr_t pa, vsize_t size)
161 {
162 	paddr_t p;
163 	vaddr_t rva, va;
164 	vsize_t vsize, offset;
165 
166 	if (!arc_wired_map_paddr_entry(pa, &rva, &vsize))
167 		return 0;	/* not wired mapped */
168 	/* XXX: same physical address may be wired mapped more than once */
169 	offset = (vsize_t)pa & (vsize - 1);
170 	pa -= offset;
171 	size += offset;
172 	va = rva;
173 	for (;;) {
174 		pa += vsize;
175 		va += vsize;
176 		size -= vsize;
177 		if (size <= 0)
178 			break;
179 		if (!arc_wired_map_vaddr_entry(va, &p, &vsize) || p != pa)
180 			return 0; /* not contiguously wired mapped */
181 	}
182 	return rva + offset;
183 }
184 
185 /* Allocate new wired entries */
186 vaddr_t
arc_map_wired(paddr_t pa,vsize_t size)187 arc_map_wired(paddr_t pa, vsize_t size)
188 {
189 	vmem_addr_t va;
190 	vsize_t off;
191 	int error;
192 
193 	/* XXX: may be already partially wired mapped */
194 
195 	off = pa & MIPS3_WIRED_OFFMASK;
196 	pa &= ~(paddr_t)MIPS3_WIRED_OFFMASK;
197 	size += off;
198 
199 	if ((size + MIPS3_WIRED_ENTRY_SIZE(MIPS3_WIRED_SIZE) - 1) /
200 	    MIPS3_WIRED_ENTRY_SIZE(MIPS3_WIRED_SIZE) >
201 	    MIPS3_NWIRED_ENTRY - mips3_nwired_page) {
202 #ifdef DIAGNOSTIC
203 		printf("arc_map_wired(0x%"PRIxPADDR", 0x%"PRIxVSIZE"): %d is not enough\n",
204 		    pa + off, size - off,
205 		    MIPS3_NWIRED_ENTRY - mips3_nwired_page);
206 #endif
207 		return 0; /* free wired TLB is not enough */
208 	}
209 
210 	error = vmem_xalloc(arc_wired_map_arena, size,
211 			    MIPS3_WIRED_SIZE,		/* align */
212 			    0,				/* phase */
213 			    0,				/* nocross */
214 			    VMEM_ADDR_MIN,		/* minaddr */
215 			    VMEM_ADDR_MAX,		/* maxaddr */
216 			    VM_BESTFIT | VM_NOSLEEP,
217 			    &va);
218 	if (error) {
219 #ifdef DIAGNOSTIC
220 		printf("arc_map_wired: can't allocate region\n");
221 #endif
222 		return 0;
223 	}
224 	mips3_wired_enter_region(va, pa, MIPS3_WIRED_SIZE);
225 
226 	return va + off;
227 }
228 
229 bool
arc_wired_map_extract(vaddr_t va,paddr_t * pap)230 arc_wired_map_extract(vaddr_t va, paddr_t *pap)
231 {
232 	paddr_t pa;
233 	vsize_t size;
234 
235 	if (arc_wired_map_vaddr_entry(va, &pa, &size)) {
236 		*pap = pa + (va & (size - 1));
237 		return true;
238 	} else {
239 		return false;
240 	}
241 }
242