xref: /netbsd-src/sys/uvm/pmap/pmap_pvt.c (revision 4faf9bd8ae2f72c1a699b47dc3b2886bdcd25885)
1 /*	$NetBSD: pmap_pvt.c,v 1.15 2022/05/08 22:03:02 rin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pmap_pvt.c,v 1.15 2022/05/08 22:03:02 rin Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/pserialize.h>
39 
40 #include <uvm/uvm.h>
41 #include <uvm/pmap/pmap_pvt.h>
42 
43 #if !defined(PMAP_PV_TRACK_ONLY_STUBS)
44 /*
45  * unmanaged pv-tracked ranges
46  *
47  * This is a linear list for now because the only user are the DRM
48  * graphics drivers, with a single tracked range per device, for the
49  * graphics aperture, so there are expected to be few of them.
50  *
51  * This is used only after the VM system is initialized well enough
52  * that we can use kmem_alloc.
53  */
54 
55 struct pv_track {
56 	paddr_t			pvt_start;
57 	psize_t			pvt_size;
58 	struct pv_track		*pvt_next;
59 	struct pmap_page	pvt_pages[];
60 };
61 
62 static struct {
63 	kmutex_t	lock;
64 	pserialize_t	psz;
65 	struct pv_track	*list;
66 } pv_unmanaged __cacheline_aligned;
67 
68 void
pmap_pv_init(void)69 pmap_pv_init(void)
70 {
71 
72 	mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
73 	pv_unmanaged.psz = pserialize_create();
74 	pv_unmanaged.list = NULL;
75 }
76 
77 void
pmap_pv_track(paddr_t start,psize_t size)78 pmap_pv_track(paddr_t start, psize_t size)
79 {
80 	struct pv_track *pvt;
81 	size_t npages;
82 
83 	KASSERT(start == trunc_page(start));
84 	KASSERT(size == trunc_page(size));
85 
86 	/* We may sleep for allocation.  */
87 	ASSERT_SLEEPABLE();
88 
89 	npages = size >> PAGE_SHIFT;
90 	pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
91 	    KM_SLEEP);
92 	pvt->pvt_start = start;
93 	pvt->pvt_size = size;
94 
95 #ifdef PMAP_PAGE_INIT
96 	for (size_t i = 0; i < npages; i++)
97 		PMAP_PAGE_INIT(&pvt->pvt_pages[i]);
98 #endif
99 
100 	mutex_enter(&pv_unmanaged.lock);
101 	pvt->pvt_next = pv_unmanaged.list;
102 	atomic_store_release(&pv_unmanaged.list, pvt);
103 	mutex_exit(&pv_unmanaged.lock);
104 }
105 
106 void
pmap_pv_untrack(paddr_t start,psize_t size)107 pmap_pv_untrack(paddr_t start, psize_t size)
108 {
109 	struct pv_track **pvtp, *pvt;
110 	size_t npages;
111 
112 	KASSERT(start == trunc_page(start));
113 	KASSERT(size == trunc_page(size));
114 
115 	/* We may sleep for pserialize_perform.  */
116 	ASSERT_SLEEPABLE();
117 
118 	mutex_enter(&pv_unmanaged.lock);
119 	for (pvtp = &pv_unmanaged.list;
120 	     (pvt = *pvtp) != NULL;
121 	     pvtp = &pvt->pvt_next) {
122 		if (pvt->pvt_start != start)
123 			continue;
124 		if (pvt->pvt_size != size)
125 			panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
126 			    ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
127 			    pvt->pvt_start, pvt->pvt_size, size);
128 
129 		/*
130 		 * Remove from list.  Readers can safely see the old
131 		 * and new states of the list.
132 		 */
133 		atomic_store_relaxed(pvtp, pvt->pvt_next);
134 
135 		/* Wait for readers who can see the old state to finish.  */
136 		pserialize_perform(pv_unmanaged.psz);
137 
138 		/*
139 		 * We now have exclusive access to pvt and can destroy
140 		 * it.  Poison it to catch bugs.
141 		 */
142 		explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
143 		goto out;
144 	}
145 	panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
146 	    " (0x%"PRIxPSIZE" bytes)",
147 	    start, size);
148 out:	mutex_exit(&pv_unmanaged.lock);
149 
150 	npages = size >> PAGE_SHIFT;
151 	kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
152 }
153 
154 struct pmap_page *
pmap_pv_tracked(paddr_t pa)155 pmap_pv_tracked(paddr_t pa)
156 {
157 	struct pv_track *pvt;
158 	size_t pgno;
159 	int s;
160 
161 	KASSERT(pa == trunc_page(pa));
162 
163 	s = pserialize_read_enter();
164 	for (pvt = atomic_load_consume(&pv_unmanaged.list);
165 	     pvt != NULL;
166 	     pvt = pvt->pvt_next) {
167 		if ((pvt->pvt_start <= pa) &&
168 		    ((pa - pvt->pvt_start) < pvt->pvt_size))
169 			break;
170 	}
171 	pserialize_read_exit(s);
172 
173 	if (pvt == NULL)
174 		return NULL;
175 	KASSERT(pvt->pvt_start <= pa);
176 	KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
177 	pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
178 	return &pvt->pvt_pages[pgno];
179 }
180 
181 #else /* PMAP_PV_TRACK_ONLY_STUBS */
182 /*
183  * Provide empty stubs just for MODULAR kernels.
184  */
185 
186 void
pmap_pv_init(void)187 pmap_pv_init(void)
188 {
189 
190 }
191 
192 struct pmap_page *
pmap_pv_tracked(paddr_t pa)193 pmap_pv_tracked(paddr_t pa)
194 {
195 
196 	return NULL;
197 }
198 
199 #if notdef
200 /*
201  * pmap_pv_{,un}track() are intentionally commented out. If modules
202  * call these functions, the result should be an inconsistent state.
203  *
204  * Such modules require real PV-tracking support. Let us make the
205  * two symbols undefined, and prevent these modules from loaded.
206  */
207 void
pmap_pv_track(paddr_t start,psize_t size)208 pmap_pv_track(paddr_t start, psize_t size)
209 {
210 
211 	panic("PV-tracking not supported");
212 }
213 
214 void
pmap_pv_untrack(paddr_t start,psize_t size)215 pmap_pv_untrack(paddr_t start, psize_t size)
216 {
217 
218 	panic("PV-tracking not supported");
219 }
220 #endif /* notdef */
221 
222 #endif /* PMAP_PV_TRACK_ONLY_STUBS */
223