xref: /netbsd-src/sys/uvm/pmap/pmap_pvt.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: pmap_pvt.c,v 1.11 2021/07/21 06:35:45 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pmap_pvt.c,v 1.11 2021/07/21 06:35:45 skrll Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/atomic.h>
37 #include <sys/kmem.h>
38 #include <sys/pserialize.h>
39 
40 #include <uvm/uvm.h>
41 #include <uvm/pmap/pmap_pvt.h>
42 
43 /*
44  * unmanaged pv-tracked ranges
45  *
46  * This is a linear list for now because the only user are the DRM
47  * graphics drivers, with a single tracked range per device, for the
48  * graphics aperture, so there are expected to be few of them.
49  *
50  * This is used only after the VM system is initialized well enough
51  * that we can use kmem_alloc.
52  */
53 
54 struct pv_track {
55 	paddr_t			pvt_start;
56 	psize_t			pvt_size;
57 	struct pv_track		*pvt_next;
58 	struct pmap_page	pvt_pages[];
59 };
60 
61 static struct {
62 	kmutex_t	lock;
63 	pserialize_t	psz;
64 	struct pv_track	*list;
65 } pv_unmanaged __cacheline_aligned;
66 
67 void
68 pmap_pv_init(void)
69 {
70 
71 	mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
72 	pv_unmanaged.psz = pserialize_create();
73 	pv_unmanaged.list = NULL;
74 }
75 
76 void
77 pmap_pv_track(paddr_t start, psize_t size)
78 {
79 	struct pv_track *pvt;
80 	size_t npages;
81 
82 	KASSERT(start == trunc_page(start));
83 	KASSERT(size == trunc_page(size));
84 
85 	/* We may sleep for allocation.  */
86 	ASSERT_SLEEPABLE();
87 
88 	npages = size >> PAGE_SHIFT;
89 	pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
90 	    KM_SLEEP);
91 	pvt->pvt_start = start;
92 	pvt->pvt_size = size;
93 
94 #ifdef PMAP_PAGE_INIT
95 	for (size_t i = 0; i < npages; i++)
96 		PMAP_PAGE_INIT(&pvt->pvt_pages[i]);
97 #endif
98 
99 	mutex_enter(&pv_unmanaged.lock);
100 	pvt->pvt_next = pv_unmanaged.list;
101 	atomic_store_release(&pv_unmanaged.list, pvt);
102 	mutex_exit(&pv_unmanaged.lock);
103 }
104 
105 void
106 pmap_pv_untrack(paddr_t start, psize_t size)
107 {
108 	struct pv_track **pvtp, *pvt;
109 	size_t npages;
110 
111 	KASSERT(start == trunc_page(start));
112 	KASSERT(size == trunc_page(size));
113 
114 	/* We may sleep for pserialize_perform.  */
115 	ASSERT_SLEEPABLE();
116 
117 	mutex_enter(&pv_unmanaged.lock);
118 	for (pvtp = &pv_unmanaged.list;
119 	     (pvt = *pvtp) != NULL;
120 	     pvtp = &pvt->pvt_next) {
121 		if (pvt->pvt_start != start)
122 			continue;
123 		if (pvt->pvt_size != size)
124 			panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
125 			    ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
126 			    pvt->pvt_start, pvt->pvt_size, size);
127 
128 		/*
129 		 * Remove from list.  Readers can safely see the old
130 		 * and new states of the list.
131 		 */
132 		atomic_store_relaxed(pvtp, pvt->pvt_next);
133 
134 		/* Wait for readers who can see the old state to finish.  */
135 		pserialize_perform(pv_unmanaged.psz);
136 
137 		/*
138 		 * We now have exclusive access to pvt and can destroy
139 		 * it.  Poison it to catch bugs.
140 		 */
141 		explicit_memset(&pvt->pvt_next, 0x1a, sizeof pvt->pvt_next);
142 		goto out;
143 	}
144 	panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
145 	    " (0x%"PRIxPSIZE" bytes)",
146 	    start, size);
147 out:	mutex_exit(&pv_unmanaged.lock);
148 
149 	npages = size >> PAGE_SHIFT;
150 	kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
151 }
152 
153 struct pmap_page *
154 pmap_pv_tracked(paddr_t pa)
155 {
156 	struct pv_track *pvt;
157 	size_t pgno;
158 	int s;
159 
160 	KASSERT(pa == trunc_page(pa));
161 
162 	s = pserialize_read_enter();
163 	for (pvt = atomic_load_consume(&pv_unmanaged.list);
164 	     pvt != NULL;
165 	     pvt = pvt->pvt_next) {
166 		if ((pvt->pvt_start <= pa) &&
167 		    ((pa - pvt->pvt_start) < pvt->pvt_size))
168 			break;
169 	}
170 	pserialize_read_exit(s);
171 
172 	if (pvt == NULL)
173 		return NULL;
174 	KASSERT(pvt->pvt_start <= pa);
175 	KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
176 	pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
177 	return &pvt->pvt_pages[pgno];
178 }
179 
180