1 /* $NetBSD: bus_dmamem_common.c,v 1.5 2022/11/12 07:48:34 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
34
35 __KERNEL_RCSID(0, "$NetBSD: bus_dmamem_common.c,v 1.5 2022/11/12 07:48:34 skrll Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/bus.h>
41
42 #include <uvm/uvm_extern.h>
43 #include <uvm/uvm_page.h>
44
45 #include <dev/bus_dma/bus_dmamem_common.h>
46
47 /*
48 * _bus_dmamem_alloc_range_common --
49 * Allocate physical memory from the specified physical address range.
50 */
51 int
_bus_dmamem_alloc_range_common(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)52 _bus_dmamem_alloc_range_common(bus_dma_tag_t t,
53 bus_size_t size,
54 bus_size_t alignment,
55 bus_size_t boundary,
56 bus_dma_segment_t *segs,
57 int nsegs,
58 int *rsegs,
59 int flags,
60 paddr_t low,
61 paddr_t high)
62 {
63 paddr_t curaddr, lastaddr;
64 struct vm_page *m;
65 struct pglist mlist;
66 int curseg, error;
67
68 /* Always round the size. */
69 size = round_page(size);
70
71 /* Allocate pages from the VM system. */
72 error = uvm_pglistalloc(size, low, high, alignment, boundary,
73 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
74 if (__predict_false(error != 0))
75 return (error);
76
77 /*
78 * Compute the location, size, and number of segments actually
79 * returned by the VM system.
80 */
81 m = TAILQ_FIRST(&mlist);
82 curseg = 0;
83 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
84 segs[curseg].ds_len = PAGE_SIZE;
85 m = TAILQ_NEXT(m, pageq.queue);
86
87 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
88 curaddr = VM_PAGE_TO_PHYS(m);
89 KASSERT(curaddr >= low);
90 KASSERT(curaddr < high);
91 if (curaddr == (lastaddr + PAGE_SIZE))
92 segs[curseg].ds_len += PAGE_SIZE;
93 else {
94 curseg++;
95 segs[curseg].ds_addr = curaddr;
96 segs[curseg].ds_len = PAGE_SIZE;
97 }
98 lastaddr = curaddr;
99 }
100
101 *rsegs = curseg + 1;
102
103 return (0);
104 }
105
106 /*
107 * _bus_dmamem_free_common --
108 * Free memory allocated with _bus_dmamem_alloc_range_common()
109 * back to the VM system.
110 */
111 void
_bus_dmamem_free_common(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)112 _bus_dmamem_free_common(bus_dma_tag_t t,
113 bus_dma_segment_t *segs,
114 int nsegs)
115 {
116 struct vm_page *m;
117 bus_addr_t addr;
118 struct pglist mlist;
119 int curseg;
120
121 TAILQ_INIT(&mlist);
122 for (curseg = 0; curseg < nsegs; curseg++) {
123 for (addr = segs[curseg].ds_addr;
124 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
125 addr += PAGE_SIZE) {
126 m = PHYS_TO_VM_PAGE(addr);
127 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
128 }
129 }
130
131 uvm_pglistfree(&mlist);
132 }
133
134 /*
135 * _bus_dmamem_map_common --
136 * Map memory allocated with _bus_dmamem_alloc_range_common() into
137 * the kernel virtual address space.
138 */
139 int
_bus_dmamem_map_common(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags,int pmapflags)140 _bus_dmamem_map_common(bus_dma_tag_t t,
141 bus_dma_segment_t *segs,
142 int nsegs,
143 size_t size,
144 void **kvap,
145 int flags,
146 int pmapflags)
147 {
148 vaddr_t va;
149 bus_addr_t addr;
150 int curseg;
151 const uvm_flag_t kmflags =
152 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
153
154 size = round_page(size);
155
156 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
157 if (__predict_false(va == 0))
158 return (ENOMEM);
159
160 *kvap = (void *)va;
161
162 for (curseg = 0; curseg < nsegs; curseg++) {
163 for (addr = segs[curseg].ds_addr;
164 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
165 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
166 KASSERT(size != 0);
167 /* XXX pmap_kenter_pa()? */
168 pmap_enter(pmap_kernel(), va, addr,
169 VM_PROT_READ | VM_PROT_WRITE,
170 pmapflags | PMAP_WIRED |
171 VM_PROT_READ | VM_PROT_WRITE);
172 }
173 }
174 pmap_update(pmap_kernel());
175
176 return (0);
177 }
178
179 /*
180 * _bus_dmamem_unmap_common --
181 * Remove a mapping created with _bus_dmamem_map_common().
182 */
183 void
_bus_dmamem_unmap_common(bus_dma_tag_t t,void * kva,size_t size)184 _bus_dmamem_unmap_common(bus_dma_tag_t t,
185 void *kva,
186 size_t size)
187 {
188
189 KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);
190
191 size = round_page(size);
192 /* XXX pmap_kremove()? See above... */
193 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
194 pmap_update(pmap_kernel());
195 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
196 }
197
198 /*
199 * _bus_dmamem_mmap_common --
200 * Mmap support for memory allocated with _bus_dmamem_alloc_range_common().
201 */
202 bus_addr_t
_bus_dmamem_mmap_common(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)203 _bus_dmamem_mmap_common(bus_dma_tag_t t,
204 bus_dma_segment_t *segs,
205 int nsegs,
206 off_t off,
207 int prot,
208 int flags)
209 {
210 int i;
211
212 for (i = 0; i < nsegs; i++) {
213 KASSERT((off & PAGE_MASK) == 0);
214 KASSERT((segs[i].ds_addr & PAGE_MASK) == 0);
215 KASSERT((segs[i].ds_len & PAGE_MASK) == 0);
216 if (off >= segs[i].ds_len) {
217 off -= segs[i].ds_len;
218 continue;
219 }
220
221 /* XXX BUS_DMA_COHERENT */
222
223 return (segs[i].ds_addr + off);
224 }
225
226 /* Page not found. */
227 return ((bus_addr_t)-1);
228 }
229