xref: /netbsd-src/sys/uvm/pmap/pmap_pvt.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: pmap_pvt.c,v 1.3 2016/02/07 18:41:25 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pmap_pvt.c,v 1.3 2016/02/07 18:41:25 riastradh Exp $");
34 
35 #include <sys/kmem.h>
36 #include <sys/pserialize.h>
37 
38 #include <uvm/uvm.h>
39 #include <uvm/pmap/pmap_pvt.h>
40 
41 /*
42  * unmanaged pv-tracked ranges
43  *
44  * This is a linear list for now because the only user are the DRM
45  * graphics drivers, with a single tracked range per device, for the
46  * graphics aperture, so there are expected to be few of them.
47  *
48  * This is used only after the VM system is initialized well enough
49  * that we can use kmem_alloc.
50  */
51 
52 struct pv_track {
53 	paddr_t			pvt_start;
54 	psize_t			pvt_size;
55 	struct pv_track		*pvt_next;
56 	struct pmap_page	pvt_pages[];
57 };
58 
59 static struct {
60 	kmutex_t	lock;
61 	pserialize_t	psz;
62 	struct pv_track	*list;
63 } pv_unmanaged __cacheline_aligned;
64 
65 void
66 pmap_pv_init(void)
67 {
68 
69 	mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_NONE);
70 	pv_unmanaged.psz = pserialize_create();
71 	pv_unmanaged.list = NULL;
72 }
73 
74 void
75 pmap_pv_track(paddr_t start, psize_t size)
76 {
77 	struct pv_track *pvt;
78 	size_t npages;
79 
80 	KASSERT(start == trunc_page(start));
81 	KASSERT(size == trunc_page(size));
82 
83 	/* We may sleep for allocation.  */
84 	ASSERT_SLEEPABLE();
85 
86 	npages = size >> PAGE_SHIFT;
87 	pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]),
88 	    KM_SLEEP);
89 	pvt->pvt_start = start;
90 	pvt->pvt_size = size;
91 
92 	mutex_enter(&pv_unmanaged.lock);
93 	pvt->pvt_next = pv_unmanaged.list;
94 	membar_producer();
95 	pv_unmanaged.list = pvt;
96 	mutex_exit(&pv_unmanaged.lock);
97 }
98 
99 void
100 pmap_pv_untrack(paddr_t start, psize_t size)
101 {
102 	struct pv_track **pvtp, *pvt;
103 	size_t npages;
104 
105 	KASSERT(start == trunc_page(start));
106 	KASSERT(size == trunc_page(size));
107 
108 	/* We may sleep for pserialize_perform.  */
109 	ASSERT_SLEEPABLE();
110 
111 	mutex_enter(&pv_unmanaged.lock);
112 	for (pvtp = &pv_unmanaged.list;
113 	     (pvt = *pvtp) != NULL;
114 	     pvtp = &pvt->pvt_next) {
115 		if (pvt->pvt_start != start)
116 			continue;
117 		if (pvt->pvt_size != size)
118 			panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR
119 			    ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes",
120 			    pvt->pvt_start, pvt->pvt_size, size);
121 		*pvtp = pvt->pvt_next;
122 		pserialize_perform(pv_unmanaged.psz);
123 		pvt->pvt_next = NULL;
124 		goto out;
125 	}
126 	panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR
127 	    " (0x%"PRIxPSIZE" bytes)",
128 	    start, size);
129 out:	mutex_exit(&pv_unmanaged.lock);
130 
131 	npages = size >> PAGE_SHIFT;
132 	kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages]));
133 }
134 
135 struct pmap_page *
136 pmap_pv_tracked(paddr_t pa)
137 {
138 	struct pv_track *pvt;
139 	size_t pgno;
140 	int s;
141 
142 	KASSERT(pa == trunc_page(pa));
143 
144 	s = pserialize_read_enter();
145 	for (pvt = pv_unmanaged.list; pvt != NULL; pvt = pvt->pvt_next) {
146 		membar_datadep_consumer();
147 		if ((pvt->pvt_start <= pa) &&
148 		    ((pa - pvt->pvt_start) < pvt->pvt_size))
149 			break;
150 	}
151 	pserialize_read_exit(s);
152 
153 	if (pvt == NULL)
154 		return NULL;
155 	KASSERT(pvt->pvt_start <= pa);
156 	KASSERT((pa - pvt->pvt_start) < pvt->pvt_size);
157 	pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT;
158 	return &pvt->pvt_pages[pgno];
159 }
160 
161