xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_kmap.c (revision 2c6fc41c810f5088457889d00eba558e8bc74d9e)
1 /*	$NetBSD: linux_kmap.c,v 1.4 2014/03/28 23:22:27 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.4 2014/03/28 23:22:27 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/mutex.h>
38 #include <sys/rbtree.h>
39 
40 #include <uvm/uvm_extern.h>
41 
42 #include <linux/highmem.h>
43 
44 /*
45  * XXX Kludgerific implementation of Linux kmap_atomic, which is
46  * required not to fail.  To accomodate this, we reserve one page of
47  * kva at boot (or load) and limit the system to at most kmap_atomic in
48  * use at a time.
49  */
50 
51 /*
52  * XXX Use direct-mapped physical pages where available, e.g. amd64.
53  *
54  * XXX ...or add an abstraction to uvm for this.  (uvm_emap?)
55  */
56 
57 static kmutex_t linux_kmap_atomic_lock;
58 static vaddr_t linux_kmap_atomic_vaddr;
59 
60 static kmutex_t linux_kmap_lock;
61 static rb_tree_t linux_kmap_entries;
62 
63 struct linux_kmap_entry {
64 	paddr_t		lke_paddr;
65 	vaddr_t		lke_vaddr;
66 	unsigned int	lke_refcnt;
67 	rb_node_t	lke_node;
68 };
69 
70 static int
71 lke_compare_nodes(void *ctx __unused, const void *an, const void *bn)
72 {
73 	const struct linux_kmap_entry *const a = an;
74 	const struct linux_kmap_entry *const b = bn;
75 
76 	if (a->lke_paddr < b->lke_paddr)
77 		return -1;
78 	else if (a->lke_paddr > b->lke_paddr)
79 		return +1;
80 	else
81 		return 0;
82 }
83 
84 static int
85 lke_compare_key(void *ctx __unused, const void *node, const void *key)
86 {
87 	const struct linux_kmap_entry *const lke = node;
88 	const paddr_t *const paddrp = key;
89 
90 	if (lke->lke_paddr < *paddrp)
91 		return -1;
92 	else if (lke->lke_paddr > *paddrp)
93 		return +1;
94 	else
95 		return 0;
96 }
97 
98 static const rb_tree_ops_t linux_kmap_entry_ops = {
99 	.rbto_compare_nodes = &lke_compare_nodes,
100 	.rbto_compare_key = &lke_compare_key,
101 	.rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node),
102 	.rbto_context = NULL,
103 };
104 
105 int
106 linux_kmap_init(void)
107 {
108 
109 	/* IPL_VM is needed to block pmap_kenter_pa.  */
110 	mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM);
111 
112 	linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
113 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
114 
115 	KASSERT(linux_kmap_atomic_vaddr != 0);
116 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
117 
118 	mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_VM);
119 	rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops);
120 
121 	return 0;
122 }
123 
124 void
125 linux_kmap_fini(void)
126 {
127 
128 	KASSERT(rb_tree_iterate(&linux_kmap_entries, NULL, RB_DIR_RIGHT) ==
129 	    NULL);
130 #if 0				/* XXX no rb_tree_destroy*/
131 	rb_tree_destroy(&linux_kmap_entries);
132 #endif
133 	mutex_destroy(&linux_kmap_lock);
134 
135 	KASSERT(linux_kmap_atomic_vaddr != 0);
136 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
137 
138 	uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE,
139 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
140 
141 	mutex_destroy(&linux_kmap_atomic_lock);
142 }
143 
144 void *
145 kmap_atomic(struct page *page)
146 {
147 	const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp);
148 
149 	mutex_spin_enter(&linux_kmap_atomic_lock);
150 
151 	KASSERT(linux_kmap_atomic_vaddr != 0);
152 	KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
153 
154 	const vaddr_t vaddr = linux_kmap_atomic_vaddr;
155 	const int prot = (VM_PROT_READ | VM_PROT_WRITE);
156 	const int flags = 0;
157 	pmap_kenter_pa(vaddr, paddr, prot, flags);
158 	pmap_update(pmap_kernel());
159 
160 	return (void *)vaddr;
161 }
162 
163 void
164 kunmap_atomic(void *addr)
165 {
166 	const vaddr_t vaddr = (vaddr_t)addr;
167 
168 	KASSERT(mutex_owned(&linux_kmap_atomic_lock));
169 	KASSERT(linux_kmap_atomic_vaddr == vaddr);
170 	KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
171 
172 	pmap_kremove(vaddr, PAGE_SIZE);
173 	pmap_update(pmap_kernel());
174 
175 	mutex_spin_exit(&linux_kmap_atomic_lock);
176 }
177 
178 void *
179 kmap(struct page *page)
180 {
181 	const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
182 	const vaddr_t vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
183 	    (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
184 	KASSERT(vaddr != 0);
185 
186 	struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke),
187 	    KM_SLEEP);
188 	lke->lke_paddr = paddr;
189 	lke->lke_vaddr = vaddr;
190 
191 	mutex_spin_enter(&linux_kmap_lock);
192 	struct linux_kmap_entry *const collision __unused =
193 	    rb_tree_insert_node(&linux_kmap_entries, lke);
194 	KASSERT(collision == lke);
195 	mutex_spin_exit(&linux_kmap_lock);
196 
197 	KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL));
198 	const int prot = (VM_PROT_READ | VM_PROT_WRITE);
199 	const int flags = 0;
200 	pmap_kenter_pa(vaddr, paddr, prot, flags);
201 	pmap_update(pmap_kernel());
202 
203 	return (void *)vaddr;
204 }
205 
206 void
207 kunmap(struct page *page)
208 {
209 	const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
210 
211 	mutex_spin_enter(&linux_kmap_lock);
212 	struct linux_kmap_entry *const lke =
213 	    rb_tree_find_node(&linux_kmap_entries, &paddr);
214 	KASSERT(lke != NULL);
215 	rb_tree_remove_node(&linux_kmap_entries, lke);
216 	mutex_spin_exit(&linux_kmap_lock);
217 
218 	const vaddr_t vaddr = lke->lke_vaddr;
219 	kmem_free(lke, sizeof(*lke));
220 
221 	KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
222 
223 	pmap_kremove(vaddr, PAGE_SIZE);
224 	pmap_update(pmap_kernel());
225 
226 	uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
227 }
228