1 /* $NetBSD: bus_dma_hacks.h,v 1.20 2020/02/20 09:07:39 mrg Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _DRM_BUS_DMA_HACKS_H_ 33 #define _DRM_BUS_DMA_HACKS_H_ 34 35 #include <sys/cdefs.h> 36 #include <sys/bus.h> 37 #include <sys/kmem.h> 38 #include <sys/queue.h> 39 40 #include <uvm/uvm.h> 41 #include <uvm/uvm_extern.h> 42 43 #if defined(__i386__) || defined(__x86_64__) 44 # include <x86/bus_private.h> 45 # include <x86/machdep.h> 46 # define PHYS_TO_BUS_MEM(dmat, paddr) ((bus_addr_t)(paddr)) 47 # define BUS_MEM_TO_PHYS(dmat, baddr) ((paddr_t)(baddr)) 48 #elif defined(__arm__) || defined(__aarch64__) 49 static inline bus_addr_t 50 PHYS_TO_BUS_MEM(bus_dma_tag_t dmat, paddr_t pa) 51 { 52 unsigned i; 53 54 if (dmat->_nranges == 0) 55 return (bus_addr_t)pa; 56 57 for (i = 0; i < dmat->_nranges; i++) { 58 const struct arm32_dma_range *dr = &dmat->_ranges[i]; 59 60 if (dr->dr_sysbase <= pa && pa - dr->dr_sysbase <= dr->dr_len) 61 return pa - dr->dr_sysbase + dr->dr_busbase; 62 } 63 panic("paddr has no bus address in dma tag %p: %"PRIxPADDR, dmat, pa); 64 } 65 static inline paddr_t 66 BUS_MEM_TO_PHYS(bus_dma_tag_t dmat, bus_addr_t ba) 67 { 68 unsigned i; 69 70 if (dmat->_nranges == 0) 71 return (paddr_t)ba; 72 73 for (i = 0; i < dmat->_nranges; i++) { 74 const struct arm32_dma_range *dr = &dmat->_ranges[i]; 75 76 if (dr->dr_busbase <= ba && ba - dr->dr_busbase <= dr->dr_len) 77 return ba - dr->dr_busbase + dr->dr_sysbase; 78 } 79 panic("bus addr has no bus address in dma tag %p: %"PRIxPADDR, dmat, 80 ba); 81 } 82 #elif defined(__sparc__) || defined(__sparc64__) 83 # define PHYS_TO_BUS_MEM(dmat, paddr) ((bus_addr_t)(paddr)) 84 # define BUS_MEM_TO_PHYS(dmat, baddr) ((paddr_t)(baddr)) 85 #elif defined(__powerpc__) 86 #else 87 # error DRM GEM/TTM need new MI bus_dma APIs! Halp! 88 #endif 89 90 static inline int 91 bus_dmamem_pgfl(bus_dma_tag_t tag) 92 { 93 #if defined(__i386__) || defined(__x86_64__) 94 return x86_select_freelist(tag->_bounce_alloc_hi - 1); 95 #else 96 return VM_FREELIST_DEFAULT; 97 #endif 98 } 99 100 static inline bool 101 bus_dmatag_bounces_paddr(bus_dma_tag_t dmat, paddr_t pa) 102 { 103 #if defined(__i386__) || defined(__x86_64__) 104 return pa < dmat->_bounce_alloc_lo || dmat->_bounce_alloc_hi <= pa; 105 #elif defined(__arm__) || defined(__aarch64__) 106 unsigned i; 107 108 for (i = 0; i < dmat->_nranges; i++) { 109 const struct arm32_dma_range *dr = &dmat->_ranges[i]; 110 if (dr->dr_sysbase <= pa && pa - dr->dr_sysbase <= dr->dr_len) 111 return false; 112 } 113 return true; 114 #elif defined(__powerpc__) 115 return dmat->_bounce_thresh && pa >= dmat->_bounce_thresh; 116 #elif defined(__sparc__) || defined(__sparc64__) 117 return false; /* no bounce buffers ever */ 118 #endif 119 } 120 121 #define MAX_STACK_SEGS 32 /* XXXMRG: 512 bytes on 16 byte seg platforms */ 122 123 static inline int 124 bus_dmamap_load_pglist(bus_dma_tag_t tag, bus_dmamap_t map, 125 struct pglist *pglist, bus_size_t size, int flags) 126 { 127 km_flag_t kmflags; 128 bus_dma_segment_t *segs; 129 bus_dma_segment_t stacksegs[MAX_STACK_SEGS]; 130 int nsegs, seg; 131 struct vm_page *page; 132 int error; 133 134 nsegs = 0; 135 TAILQ_FOREACH(page, pglist, pageq.queue) { 136 if (nsegs == MIN(INT_MAX, (SIZE_MAX / sizeof(segs[0])))) 137 return ENOMEM; 138 nsegs++; 139 } 140 141 KASSERT(nsegs <= (SIZE_MAX / sizeof(segs[0]))); 142 if (nsegs > MAX_STACK_SEGS) { 143 switch (flags & (BUS_DMA_WAITOK|BUS_DMA_NOWAIT)) { 144 case BUS_DMA_WAITOK: 145 kmflags = KM_SLEEP; 146 break; 147 case BUS_DMA_NOWAIT: 148 kmflags = KM_NOSLEEP; 149 break; 150 default: 151 panic("invalid flags: %d", flags); 152 } 153 segs = kmem_alloc((nsegs * sizeof(segs[0])), kmflags); 154 if (segs == NULL) 155 return ENOMEM; 156 } else { 157 segs = stacksegs; 158 } 159 160 seg = 0; 161 TAILQ_FOREACH(page, pglist, pageq.queue) { 162 paddr_t paddr = VM_PAGE_TO_PHYS(page); 163 bus_addr_t baddr = PHYS_TO_BUS_MEM(tag, paddr); 164 165 segs[seg].ds_addr = baddr; 166 segs[seg].ds_len = PAGE_SIZE; 167 seg++; 168 } 169 170 error = bus_dmamap_load_raw(tag, map, segs, nsegs, size, flags); 171 if (error) 172 goto fail0; 173 174 /* Success! */ 175 error = 0; 176 goto out; 177 178 fail1: __unused 179 bus_dmamap_unload(tag, map); 180 fail0: KASSERT(error); 181 out: if (segs != stacksegs) { 182 KASSERT(nsegs > MAX_STACK_SEGS); 183 kmem_free(segs, (nsegs * sizeof(segs[0]))); 184 } 185 return error; 186 } 187 188 static inline int 189 bus_dmamem_export_pages(bus_dma_tag_t dmat, const bus_dma_segment_t *segs, 190 int nsegs, paddr_t *pgs, unsigned npgs) 191 { 192 int seg; 193 unsigned i; 194 195 i = 0; 196 for (seg = 0; seg < nsegs; seg++) { 197 bus_addr_t baddr = segs[i].ds_addr; 198 bus_size_t len = segs[i].ds_len; 199 200 while (len >= PAGE_SIZE) { 201 paddr_t paddr = BUS_MEM_TO_PHYS(dmat, baddr); 202 203 KASSERT(i < npgs); 204 pgs[i++] = paddr; 205 206 baddr += PAGE_SIZE; 207 len -= PAGE_SIZE; 208 } 209 KASSERT(len == 0); 210 } 211 KASSERT(i == npgs); 212 213 return 0; 214 } 215 216 static inline int 217 bus_dmamem_import_pages(bus_dma_tag_t dmat, bus_dma_segment_t *segs, 218 int nsegs, int *rsegs, const paddr_t *pgs, unsigned npgs) 219 { 220 int seg; 221 unsigned i; 222 223 seg = 0; 224 for (i = 0; i < npgs; i++) { 225 paddr_t paddr = pgs[i]; 226 bus_addr_t baddr = PHYS_TO_BUS_MEM(dmat, paddr); 227 228 if (seg > 0 && segs[seg - 1].ds_addr + PAGE_SIZE == baddr) { 229 segs[seg - 1].ds_len += PAGE_SIZE; 230 } else { 231 KASSERT(seg < nsegs); 232 segs[seg].ds_addr = baddr; 233 segs[seg].ds_len = PAGE_SIZE; 234 seg++; 235 KASSERT(seg <= nsegs); 236 } 237 } 238 KASSERT(seg <= nsegs); 239 *rsegs = seg; 240 241 return 0; 242 } 243 244 #endif /* _DRM_BUS_DMA_HACKS_H_ */ 245