1 /* $NetBSD: pmap_pvt.c,v 1.2 2015/11/11 08:22:36 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __RCSID("$NetBSD: pmap_pvt.c,v 1.2 2015/11/11 08:22:36 skrll Exp $"); 34 35 #include <sys/kmem.h> 36 #include <sys/pserialize.h> 37 38 #include <uvm/uvm.h> 39 #include <uvm/pmap/pmap_pvt.h> 40 41 /* 42 * unmanaged pv-tracked ranges 43 * 44 * This is a linear list for now because the only user are the DRM 45 * graphics drivers, with a single tracked range per device, for the 46 * graphics aperture, so there are expected to be few of them. 47 * 48 * This is used only after the VM system is initialized well enough 49 * that we can use kmem_alloc. 50 */ 51 52 struct pv_track { 53 paddr_t pvt_start; 54 psize_t pvt_size; 55 struct pv_track *pvt_next; 56 struct pmap_page pvt_pages[]; 57 }; 58 59 static struct { 60 kmutex_t lock; 61 pserialize_t psz; 62 struct pv_track *list; 63 } pv_unmanaged __cacheline_aligned; 64 65 void 66 pmap_pv_init(void) 67 { 68 69 mutex_init(&pv_unmanaged.lock, MUTEX_DEFAULT, IPL_VM); 70 pv_unmanaged.psz = pserialize_create(); 71 pv_unmanaged.list = NULL; 72 } 73 74 void 75 pmap_pv_track(paddr_t start, psize_t size) 76 { 77 struct pv_track *pvt; 78 size_t npages; 79 80 KASSERT(start == trunc_page(start)); 81 KASSERT(size == trunc_page(size)); 82 83 npages = size >> PAGE_SHIFT; 84 pvt = kmem_zalloc(offsetof(struct pv_track, pvt_pages[npages]), 85 KM_SLEEP); 86 pvt->pvt_start = start; 87 pvt->pvt_size = size; 88 89 mutex_enter(&pv_unmanaged.lock); 90 pvt->pvt_next = pv_unmanaged.list; 91 membar_producer(); 92 pv_unmanaged.list = pvt; 93 mutex_exit(&pv_unmanaged.lock); 94 } 95 96 void 97 pmap_pv_untrack(paddr_t start, psize_t size) 98 { 99 struct pv_track **pvtp, *pvt; 100 size_t npages; 101 102 KASSERT(start == trunc_page(start)); 103 KASSERT(size == trunc_page(size)); 104 105 mutex_enter(&pv_unmanaged.lock); 106 for (pvtp = &pv_unmanaged.list; 107 (pvt = *pvtp) != NULL; 108 pvtp = &pvt->pvt_next) { 109 if (pvt->pvt_start != start) 110 continue; 111 if (pvt->pvt_size != size) 112 panic("pmap_pv_untrack: pv-tracking at 0x%"PRIxPADDR 113 ": 0x%"PRIxPSIZE" bytes, not 0x%"PRIxPSIZE" bytes", 114 pvt->pvt_start, pvt->pvt_size, size); 115 *pvtp = pvt->pvt_next; 116 pserialize_perform(pv_unmanaged.psz); 117 pvt->pvt_next = NULL; 118 goto out; 119 } 120 panic("pmap_pv_untrack: pages not pv-tracked at 0x%"PRIxPADDR 121 " (0x%"PRIxPSIZE" bytes)", 122 start, size); 123 out: mutex_exit(&pv_unmanaged.lock); 124 125 npages = size >> PAGE_SHIFT; 126 kmem_free(pvt, offsetof(struct pv_track, pvt_pages[npages])); 127 } 128 129 struct pmap_page * 130 pmap_pv_tracked(paddr_t pa) 131 { 132 struct pv_track *pvt; 133 size_t pgno; 134 int s; 135 136 KASSERT(pa == trunc_page(pa)); 137 138 s = pserialize_read_enter(); 139 for (pvt = pv_unmanaged.list; pvt != NULL; pvt = pvt->pvt_next) { 140 membar_datadep_consumer(); 141 if ((pvt->pvt_start <= pa) && 142 ((pa - pvt->pvt_start) < pvt->pvt_size)) 143 break; 144 } 145 pserialize_read_exit(s); 146 147 if (pvt == NULL) 148 return NULL; 149 KASSERT(pvt->pvt_start <= pa); 150 KASSERT((pa - pvt->pvt_start) < pvt->pvt_size); 151 pgno = (pa - pvt->pvt_start) >> PAGE_SHIFT; 152 return &pvt->pvt_pages[pgno]; 153 } 154 155