1*e77f4c01Sriastradh /* $NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $ */
26cb10275Sriastradh
36cb10275Sriastradh /*-
46cb10275Sriastradh * Copyright (c) 2013 The NetBSD Foundation, Inc.
56cb10275Sriastradh * All rights reserved.
66cb10275Sriastradh *
76cb10275Sriastradh * This code is derived from software contributed to The NetBSD Foundation
86cb10275Sriastradh * by Taylor R. Campbell.
96cb10275Sriastradh *
106cb10275Sriastradh * Redistribution and use in source and binary forms, with or without
116cb10275Sriastradh * modification, are permitted provided that the following conditions
126cb10275Sriastradh * are met:
136cb10275Sriastradh * 1. Redistributions of source code must retain the above copyright
146cb10275Sriastradh * notice, this list of conditions and the following disclaimer.
156cb10275Sriastradh * 2. Redistributions in binary form must reproduce the above copyright
166cb10275Sriastradh * notice, this list of conditions and the following disclaimer in the
176cb10275Sriastradh * documentation and/or other materials provided with the distribution.
186cb10275Sriastradh *
196cb10275Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
206cb10275Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
216cb10275Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
226cb10275Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
236cb10275Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
246cb10275Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
256cb10275Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
266cb10275Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
276cb10275Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
286cb10275Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
296cb10275Sriastradh * POSSIBILITY OF SUCH DAMAGE.
306cb10275Sriastradh */
316cb10275Sriastradh
326cb10275Sriastradh #include <sys/cdefs.h>
33*e77f4c01Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_kmap.c,v 1.16 2018/08/27 15:28:53 riastradh Exp $");
346cb10275Sriastradh
356cb10275Sriastradh #include <sys/types.h>
366cb10275Sriastradh #include <sys/kmem.h>
376cb10275Sriastradh #include <sys/mutex.h>
386cb10275Sriastradh #include <sys/rbtree.h>
39e794a29bSriastradh #include <sys/sdt.h>
406cb10275Sriastradh
41ff921003Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
42ff921003Sriastradh #include <dev/mm.h>
43ff921003Sriastradh #endif
44ff921003Sriastradh
456cb10275Sriastradh #include <uvm/uvm_extern.h>
466cb10275Sriastradh
476cb10275Sriastradh #include <linux/highmem.h>
486cb10275Sriastradh
49e794a29bSriastradh SDT_PROBE_DEFINE2(sdt, linux, kmap, map,
50e794a29bSriastradh "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
51e794a29bSriastradh SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap,
52e794a29bSriastradh "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
53e794a29bSriastradh SDT_PROBE_DEFINE2(sdt, linux, kmap, map__atomic,
54e794a29bSriastradh "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
55e794a29bSriastradh SDT_PROBE_DEFINE2(sdt, linux, kmap, unmap__atomic,
56e794a29bSriastradh "paddr_t"/*paddr*/, "vaddr_t"/*vaddr*/);
57e794a29bSriastradh
586cb10275Sriastradh /*
596cb10275Sriastradh * XXX Kludgerific implementation of Linux kmap_atomic, which is
606cb10275Sriastradh * required not to fail. To accomodate this, we reserve one page of
616cb10275Sriastradh * kva at boot (or load) and limit the system to at most kmap_atomic in
626cb10275Sriastradh * use at a time.
636cb10275Sriastradh */
646cb10275Sriastradh
656cb10275Sriastradh static kmutex_t linux_kmap_atomic_lock;
666cb10275Sriastradh static vaddr_t linux_kmap_atomic_vaddr;
676cb10275Sriastradh
686cb10275Sriastradh static kmutex_t linux_kmap_lock;
696cb10275Sriastradh static rb_tree_t linux_kmap_entries;
706cb10275Sriastradh
716cb10275Sriastradh struct linux_kmap_entry {
726cb10275Sriastradh paddr_t lke_paddr;
736cb10275Sriastradh vaddr_t lke_vaddr;
746cb10275Sriastradh unsigned int lke_refcnt;
756cb10275Sriastradh rb_node_t lke_node;
766cb10275Sriastradh };
776cb10275Sriastradh
786cb10275Sriastradh static int
lke_compare_nodes(void * ctx __unused,const void * an,const void * bn)796cb10275Sriastradh lke_compare_nodes(void *ctx __unused, const void *an, const void *bn)
806cb10275Sriastradh {
816cb10275Sriastradh const struct linux_kmap_entry *const a = an;
826cb10275Sriastradh const struct linux_kmap_entry *const b = bn;
836cb10275Sriastradh
846cb10275Sriastradh if (a->lke_paddr < b->lke_paddr)
856cb10275Sriastradh return -1;
866cb10275Sriastradh else if (a->lke_paddr > b->lke_paddr)
876cb10275Sriastradh return +1;
886cb10275Sriastradh else
896cb10275Sriastradh return 0;
906cb10275Sriastradh }
916cb10275Sriastradh
926cb10275Sriastradh static int
lke_compare_key(void * ctx __unused,const void * node,const void * key)936cb10275Sriastradh lke_compare_key(void *ctx __unused, const void *node, const void *key)
946cb10275Sriastradh {
956cb10275Sriastradh const struct linux_kmap_entry *const lke = node;
966cb10275Sriastradh const paddr_t *const paddrp = key;
976cb10275Sriastradh
986cb10275Sriastradh if (lke->lke_paddr < *paddrp)
996cb10275Sriastradh return -1;
1006cb10275Sriastradh else if (lke->lke_paddr > *paddrp)
1016cb10275Sriastradh return +1;
1026cb10275Sriastradh else
1036cb10275Sriastradh return 0;
1046cb10275Sriastradh }
1056cb10275Sriastradh
1066cb10275Sriastradh static const rb_tree_ops_t linux_kmap_entry_ops = {
1076cb10275Sriastradh .rbto_compare_nodes = &lke_compare_nodes,
1086cb10275Sriastradh .rbto_compare_key = &lke_compare_key,
1096cb10275Sriastradh .rbto_node_offset = offsetof(struct linux_kmap_entry, lke_node),
1106cb10275Sriastradh .rbto_context = NULL,
1116cb10275Sriastradh };
1126cb10275Sriastradh
1136cb10275Sriastradh int
linux_kmap_init(void)1146cb10275Sriastradh linux_kmap_init(void)
1156cb10275Sriastradh {
1166cb10275Sriastradh
11732b977d2Smrg /* IPL_VM since interrupt handlers use kmap_atomic. */
11832b977d2Smrg mutex_init(&linux_kmap_atomic_lock, MUTEX_DEFAULT, IPL_VM);
1196cb10275Sriastradh
1206cb10275Sriastradh linux_kmap_atomic_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
1216cb10275Sriastradh (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
1226cb10275Sriastradh
1236cb10275Sriastradh KASSERT(linux_kmap_atomic_vaddr != 0);
1246cb10275Sriastradh KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
1256cb10275Sriastradh
126ee81707cSriastradh mutex_init(&linux_kmap_lock, MUTEX_DEFAULT, IPL_NONE);
1276cb10275Sriastradh rb_tree_init(&linux_kmap_entries, &linux_kmap_entry_ops);
1286cb10275Sriastradh
1296cb10275Sriastradh return 0;
1306cb10275Sriastradh }
1316cb10275Sriastradh
1326cb10275Sriastradh void
linux_kmap_fini(void)1336cb10275Sriastradh linux_kmap_fini(void)
1346cb10275Sriastradh {
1356cb10275Sriastradh
136809fbbc1Sriastradh KASSERT(RB_TREE_MIN(&linux_kmap_entries) == NULL);
1376cb10275Sriastradh #if 0 /* XXX no rb_tree_destroy */
1386cb10275Sriastradh rb_tree_destroy(&linux_kmap_entries);
1396cb10275Sriastradh #endif
1406cb10275Sriastradh mutex_destroy(&linux_kmap_lock);
1416cb10275Sriastradh
1426cb10275Sriastradh KASSERT(linux_kmap_atomic_vaddr != 0);
1436cb10275Sriastradh KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
1446cb10275Sriastradh
1456cb10275Sriastradh uvm_km_free(kernel_map, linux_kmap_atomic_vaddr, PAGE_SIZE,
1466cb10275Sriastradh (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
1476cb10275Sriastradh
1486cb10275Sriastradh mutex_destroy(&linux_kmap_atomic_lock);
1496cb10275Sriastradh }
1506cb10275Sriastradh
1516cb10275Sriastradh void *
kmap_atomic(struct page * page)1526cb10275Sriastradh kmap_atomic(struct page *page)
1536cb10275Sriastradh {
1546cb10275Sriastradh const paddr_t paddr = uvm_vm_page_to_phys(&page->p_vmp);
155d9db38b5Sriastradh vaddr_t vaddr;
1566cb10275Sriastradh
157ff921003Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
158ff921003Sriastradh if (mm_md_direct_mapped_phys(paddr, &vaddr))
159e794a29bSriastradh goto out;
160ff921003Sriastradh #endif
161ff921003Sriastradh
1626cb10275Sriastradh mutex_spin_enter(&linux_kmap_atomic_lock);
1636cb10275Sriastradh KASSERT(linux_kmap_atomic_vaddr != 0);
1646cb10275Sriastradh KASSERT(!pmap_extract(pmap_kernel(), linux_kmap_atomic_vaddr, NULL));
165d9db38b5Sriastradh vaddr = linux_kmap_atomic_vaddr;
166d9db38b5Sriastradh pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
1676cb10275Sriastradh pmap_update(pmap_kernel());
1686cb10275Sriastradh
169*e77f4c01Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
170*e77f4c01Sriastradh out:
171*e77f4c01Sriastradh #endif
172*e77f4c01Sriastradh SDT_PROBE2(sdt, linux, kmap, map__atomic, paddr, vaddr);
1736cb10275Sriastradh return (void *)vaddr;
1746cb10275Sriastradh }
1756cb10275Sriastradh
1766cb10275Sriastradh void
kunmap_atomic(void * addr)1776cb10275Sriastradh kunmap_atomic(void *addr)
1786cb10275Sriastradh {
1796cb10275Sriastradh const vaddr_t vaddr = (vaddr_t)addr;
180ff921003Sriastradh paddr_t paddr;
181ff921003Sriastradh bool ok __diagused;
182ff921003Sriastradh
183ff921003Sriastradh ok = pmap_extract(pmap_kernel(), vaddr, &paddr);
184ff921003Sriastradh KASSERT(ok);
185e794a29bSriastradh
186e794a29bSriastradh SDT_PROBE2(sdt, linux, kmap, unmap__atomic, paddr, vaddr);
187e794a29bSriastradh
188e794a29bSriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
189e794a29bSriastradh {
190e794a29bSriastradh vaddr_t vaddr1;
191ff921003Sriastradh if (mm_md_direct_mapped_phys(paddr, &vaddr1) && vaddr1 == vaddr)
192ff921003Sriastradh return;
193ff921003Sriastradh }
194ff921003Sriastradh #endif
195ff921003Sriastradh
1966cb10275Sriastradh KASSERT(mutex_owned(&linux_kmap_atomic_lock));
1976cb10275Sriastradh KASSERT(linux_kmap_atomic_vaddr == vaddr);
1986cb10275Sriastradh
1996cb10275Sriastradh pmap_kremove(vaddr, PAGE_SIZE);
2006cb10275Sriastradh pmap_update(pmap_kernel());
2016cb10275Sriastradh
2026cb10275Sriastradh mutex_spin_exit(&linux_kmap_atomic_lock);
2036cb10275Sriastradh }
2046cb10275Sriastradh
2056cb10275Sriastradh void *
kmap(struct page * page)20668e5b0fbSriastradh kmap(struct page *page)
2076cb10275Sriastradh {
2086cb10275Sriastradh const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
209d9db38b5Sriastradh vaddr_t vaddr;
2100e9b339dSriastradh
2110e9b339dSriastradh ASSERT_SLEEPABLE();
2120e9b339dSriastradh
213ff921003Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
214ff921003Sriastradh if (mm_md_direct_mapped_phys(paddr, &vaddr))
215e794a29bSriastradh goto out;
216ff921003Sriastradh #endif
217ff921003Sriastradh
218d9db38b5Sriastradh vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
2196cb10275Sriastradh (UVM_KMF_VAONLY | UVM_KMF_WAITVA));
2206cb10275Sriastradh KASSERT(vaddr != 0);
2216cb10275Sriastradh
2226cb10275Sriastradh struct linux_kmap_entry *const lke = kmem_alloc(sizeof(*lke),
2236cb10275Sriastradh KM_SLEEP);
2246cb10275Sriastradh lke->lke_paddr = paddr;
2256cb10275Sriastradh lke->lke_vaddr = vaddr;
2266cb10275Sriastradh
227ee81707cSriastradh mutex_enter(&linux_kmap_lock);
228f2106575Sriastradh struct linux_kmap_entry *const collision __diagused =
2296cb10275Sriastradh rb_tree_insert_node(&linux_kmap_entries, lke);
2306cb10275Sriastradh KASSERT(collision == lke);
231ee81707cSriastradh mutex_exit(&linux_kmap_lock);
2326cb10275Sriastradh
2336cb10275Sriastradh KASSERT(!pmap_extract(pmap_kernel(), vaddr, NULL));
234d9db38b5Sriastradh pmap_kenter_pa(vaddr, paddr, (VM_PROT_READ | VM_PROT_WRITE), 0);
2356cb10275Sriastradh pmap_update(pmap_kernel());
2366cb10275Sriastradh
237*e77f4c01Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
238*e77f4c01Sriastradh out:
239*e77f4c01Sriastradh #endif
240*e77f4c01Sriastradh SDT_PROBE2(sdt, linux, kmap, map, paddr, vaddr);
2416cb10275Sriastradh return (void *)vaddr;
2426cb10275Sriastradh }
2436cb10275Sriastradh
2446cb10275Sriastradh void
kunmap(struct page * page)2456cb10275Sriastradh kunmap(struct page *page)
2466cb10275Sriastradh {
2476cb10275Sriastradh const paddr_t paddr = VM_PAGE_TO_PHYS(&page->p_vmp);
248e794a29bSriastradh vaddr_t vaddr;
2496cb10275Sriastradh
2500e9b339dSriastradh ASSERT_SLEEPABLE();
2510e9b339dSriastradh
252ff921003Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
253e794a29bSriastradh if (mm_md_direct_mapped_phys(paddr, &vaddr))
254e794a29bSriastradh goto out;
255ff921003Sriastradh #endif
256ff921003Sriastradh
257ee81707cSriastradh mutex_enter(&linux_kmap_lock);
2586cb10275Sriastradh struct linux_kmap_entry *const lke =
2596cb10275Sriastradh rb_tree_find_node(&linux_kmap_entries, &paddr);
2606cb10275Sriastradh KASSERT(lke != NULL);
2616cb10275Sriastradh rb_tree_remove_node(&linux_kmap_entries, lke);
262ee81707cSriastradh mutex_exit(&linux_kmap_lock);
2636cb10275Sriastradh
264e794a29bSriastradh vaddr = lke->lke_vaddr;
2656cb10275Sriastradh kmem_free(lke, sizeof(*lke));
2666cb10275Sriastradh
2676cb10275Sriastradh KASSERT(pmap_extract(pmap_kernel(), vaddr, NULL));
2686cb10275Sriastradh
2696cb10275Sriastradh pmap_kremove(vaddr, PAGE_SIZE);
2706cb10275Sriastradh pmap_update(pmap_kernel());
2710fb1b53aSriastradh
2720fb1b53aSriastradh uvm_km_free(kernel_map, vaddr, PAGE_SIZE, UVM_KMF_VAONLY);
273e794a29bSriastradh
274*e77f4c01Sriastradh #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
275*e77f4c01Sriastradh out:
276*e77f4c01Sriastradh #endif
277*e77f4c01Sriastradh SDT_PROBE2(sdt, linux, kmap, unmap, paddr, vaddr);
2786cb10275Sriastradh }
279