1 /* $NetBSD: bus_dma_hacks.h,v 1.25 2022/07/19 23:19:44 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _DRM_BUS_DMA_HACKS_H_
33 #define _DRM_BUS_DMA_HACKS_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/bus.h>
37 #include <sys/kmem.h>
38 #include <sys/queue.h>
39
40 #include <uvm/uvm.h>
41 #include <uvm/uvm_extern.h>
42
43 #include <linux/mm_types.h> /* XXX struct page */
44
45 #if defined(__i386__) || defined(__x86_64__)
46 # include <x86/bus_private.h>
47 # include <x86/machdep.h>
48 # define PHYS_TO_BUS_MEM(dmat, paddr) ((bus_addr_t)(paddr))
49 # define BUS_MEM_TO_PHYS(dmat, baddr) ((paddr_t)(baddr))
50 #elif defined(__arm__) || defined(__aarch64__)
51 static inline bus_addr_t
PHYS_TO_BUS_MEM(bus_dma_tag_t dmat,paddr_t pa)52 PHYS_TO_BUS_MEM(bus_dma_tag_t dmat, paddr_t pa)
53 {
54 unsigned i;
55
56 if (dmat->_nranges == 0)
57 return (bus_addr_t)pa;
58
59 for (i = 0; i < dmat->_nranges; i++) {
60 const struct arm32_dma_range *dr = &dmat->_ranges[i];
61
62 if (dr->dr_sysbase <= pa && pa - dr->dr_sysbase <= dr->dr_len)
63 return pa - dr->dr_sysbase + dr->dr_busbase;
64 }
65 panic("paddr has no bus address in dma tag %p: %"PRIxPADDR, dmat, pa);
66 }
67 static inline paddr_t
BUS_MEM_TO_PHYS(bus_dma_tag_t dmat,bus_addr_t ba)68 BUS_MEM_TO_PHYS(bus_dma_tag_t dmat, bus_addr_t ba)
69 {
70 unsigned i;
71
72 if (dmat->_nranges == 0)
73 return (paddr_t)ba;
74
75 for (i = 0; i < dmat->_nranges; i++) {
76 const struct arm32_dma_range *dr = &dmat->_ranges[i];
77
78 if (dr->dr_busbase <= ba && ba - dr->dr_busbase <= dr->dr_len)
79 return ba - dr->dr_busbase + dr->dr_sysbase;
80 }
81 panic("bus addr has no bus address in dma tag %p: %"PRIxPADDR, dmat,
82 ba);
83 }
84 #elif defined(__sparc__) || defined(__sparc64__)
85 # define PHYS_TO_BUS_MEM(dmat, paddr) ((bus_addr_t)(paddr))
86 # define BUS_MEM_TO_PHYS(dmat, baddr) ((paddr_t)(baddr))
87 #elif defined(__powerpc__)
88 #elif defined(__alpha__)
89 # define PHYS_TO_BUS_MEM(dmat, paddr) \
90 ((bus_addr_t)(paddr) | (dmat)->_wbase)
91 # define BUS_MEM_TO_PHYS(dmat, baddr) \
92 ((paddr_t)((baddr) & ~(bus_addr_t)(dmat)->_wbase))
93 #else
94 # error DRM GEM/TTM need new MI bus_dma APIs! Halp!
95 #endif
96
97 static inline int
bus_dmamem_pgfl(bus_dma_tag_t tag)98 bus_dmamem_pgfl(bus_dma_tag_t tag)
99 {
100 #if defined(__i386__) || defined(__x86_64__)
101 return x86_select_freelist(tag->_bounce_alloc_hi - 1);
102 #else
103 return VM_FREELIST_DEFAULT;
104 #endif
105 }
106
107 static inline bool
bus_dmatag_bounces_paddr(bus_dma_tag_t dmat,paddr_t pa)108 bus_dmatag_bounces_paddr(bus_dma_tag_t dmat, paddr_t pa)
109 {
110 #if defined(__i386__) || defined(__x86_64__)
111 return pa < dmat->_bounce_alloc_lo || dmat->_bounce_alloc_hi <= pa;
112 #elif defined(__arm__) || defined(__aarch64__)
113 unsigned i;
114
115 for (i = 0; i < dmat->_nranges; i++) {
116 const struct arm32_dma_range *dr = &dmat->_ranges[i];
117 if (dr->dr_sysbase <= pa && pa - dr->dr_sysbase <= dr->dr_len)
118 return false;
119 }
120 return true;
121 #elif defined(__powerpc__)
122 return dmat->_bounce_thresh && pa >= dmat->_bounce_thresh;
123 #elif defined(__sparc__) || defined(__sparc64__)
124 return false; /* no bounce buffers ever */
125 #elif defined(__alpha__)
126 return (dmat->_wsize == 0 ? false : pa >= dmat->_wsize);
127 #endif
128 }
129
130 #define MAX_STACK_SEGS 32 /* XXXMRG: 512 bytes on 16 byte seg platforms */
131
132 /*
133 * XXX This should really take an array of struct vm_page pointers, but
134 * Linux drm code stores arrays of struct page pointers, and these two
135 * types (struct page ** and struct vm_page **) are not compatible so
136 * naive conversion would violate strict aliasing rules.
137 */
138 static inline int
bus_dmamap_load_pages(bus_dma_tag_t tag,bus_dmamap_t map,struct page ** pgs,bus_size_t size,int flags)139 bus_dmamap_load_pages(bus_dma_tag_t tag, bus_dmamap_t map,
140 struct page **pgs, bus_size_t size, int flags)
141 {
142 km_flag_t kmflags;
143 bus_dma_segment_t *segs;
144 bus_dma_segment_t stacksegs[MAX_STACK_SEGS];
145 int nsegs, seg;
146 struct vm_page *page;
147 int error;
148
149 KASSERT((size & (PAGE_SIZE - 1)) == 0);
150
151 if ((size >> PAGE_SHIFT) > INT_MAX)
152 return ENOMEM;
153 nsegs = size >> PAGE_SHIFT;
154
155 KASSERT(nsegs <= (SIZE_MAX / sizeof(segs[0])));
156 if (nsegs > MAX_STACK_SEGS) {
157 switch (flags & (BUS_DMA_WAITOK|BUS_DMA_NOWAIT)) {
158 case BUS_DMA_WAITOK:
159 kmflags = KM_SLEEP;
160 break;
161 case BUS_DMA_NOWAIT:
162 kmflags = KM_NOSLEEP;
163 break;
164 default:
165 panic("invalid flags: %d", flags);
166 }
167 segs = kmem_alloc((nsegs * sizeof(segs[0])), kmflags);
168 if (segs == NULL)
169 return ENOMEM;
170 } else {
171 segs = stacksegs;
172 }
173
174 for (seg = 0; seg < nsegs; seg++) {
175 page = &pgs[seg]->p_vmp;
176 paddr_t paddr = VM_PAGE_TO_PHYS(page);
177 bus_addr_t baddr = PHYS_TO_BUS_MEM(tag, paddr);
178
179 segs[seg].ds_addr = baddr;
180 segs[seg].ds_len = PAGE_SIZE;
181 }
182
183 error = bus_dmamap_load_raw(tag, map, segs, nsegs, size, flags);
184 if (error)
185 goto fail0;
186
187 /* Success! */
188 error = 0;
189 goto out;
190
191 fail1: __unused
192 bus_dmamap_unload(tag, map);
193 fail0: KASSERT(error);
194 out: if (segs != stacksegs) {
195 KASSERT(nsegs > MAX_STACK_SEGS);
196 kmem_free(segs, (nsegs * sizeof(segs[0])));
197 }
198 return error;
199 }
200
201 static inline int
bus_dmamem_export_pages(bus_dma_tag_t dmat,const bus_dma_segment_t * segs,int nsegs,struct page ** pgs,unsigned npgs)202 bus_dmamem_export_pages(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
203 int nsegs, struct page **pgs, unsigned npgs)
204 {
205 int seg;
206 unsigned pg;
207
208 pg = 0;
209 for (seg = 0; seg < nsegs; seg++) {
210 bus_addr_t baddr = segs[seg].ds_addr;
211 bus_size_t len = segs[seg].ds_len;
212
213 while (len >= PAGE_SIZE) {
214 paddr_t paddr = BUS_MEM_TO_PHYS(dmat, baddr);
215
216 KASSERT(pg < npgs);
217 pgs[pg++] = container_of(PHYS_TO_VM_PAGE(paddr),
218 struct page, p_vmp);
219
220 baddr += PAGE_SIZE;
221 len -= PAGE_SIZE;
222 }
223 KASSERT(len == 0);
224 }
225 KASSERT(pg == npgs);
226
227 return 0;
228 }
229
230 static inline int
bus_dmamem_import_pages(bus_dma_tag_t dmat,bus_dma_segment_t * segs,int nsegs,int * rsegs,struct page * const * pgs,unsigned npgs)231 bus_dmamem_import_pages(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
232 int nsegs, int *rsegs, struct page *const *pgs, unsigned npgs)
233 {
234 int seg;
235 unsigned i;
236
237 seg = 0;
238 for (i = 0; i < npgs; i++) {
239 paddr_t paddr = VM_PAGE_TO_PHYS(&pgs[i]->p_vmp);
240 bus_addr_t baddr = PHYS_TO_BUS_MEM(dmat, paddr);
241
242 if (seg > 0 && segs[seg - 1].ds_addr + PAGE_SIZE == baddr) {
243 segs[seg - 1].ds_len += PAGE_SIZE;
244 } else {
245 KASSERT(seg < nsegs);
246 segs[seg].ds_addr = baddr;
247 segs[seg].ds_len = PAGE_SIZE;
248 seg++;
249 KASSERT(seg <= nsegs);
250 }
251 }
252 KASSERT(seg <= nsegs);
253 *rsegs = seg;
254
255 return 0;
256 }
257
258 #endif /* _DRM_BUS_DMA_HACKS_H_ */
259