1 /* $NetBSD: linux_sgt.c,v 1.4 2021/12/24 15:08:31 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: linux_sgt.c,v 1.4 2021/12/24 15:08:31 riastradh Exp $");
31
32 #include <sys/bus.h>
33 #include <sys/errno.h>
34
35 #include <drm/bus_dma_hacks.h>
36
37 #include <linux/dma-mapping.h>
38 #include <linux/gfp.h>
39 #include <linux/mm_types.h>
40 #include <linux/scatterlist.h>
41 #include <linux/slab.h>
42
43 int
sg_alloc_table(struct sg_table * sgt,unsigned npgs,gfp_t gfp)44 sg_alloc_table(struct sg_table *sgt, unsigned npgs, gfp_t gfp)
45 {
46
47 sgt->sgl->sg_pgs = kcalloc(npgs, sizeof(sgt->sgl->sg_pgs[0]), gfp);
48 if (sgt->sgl->sg_pgs == NULL)
49 return -ENOMEM;
50 sgt->sgl->sg_npgs = sgt->nents = npgs;
51 sgt->sgl->sg_dmamap = NULL;
52
53 return 0;
54 }
55
56 int
__sg_alloc_table_from_pages(struct sg_table * sgt,struct page ** pgs,unsigned npgs,bus_size_t offset,bus_size_t size,unsigned maxseg,gfp_t gfp)57 __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs,
58 unsigned npgs, bus_size_t offset, bus_size_t size, unsigned maxseg,
59 gfp_t gfp)
60 {
61 unsigned i;
62 int ret;
63
64 KASSERT(offset == 0);
65 KASSERT(size == (bus_size_t)npgs << PAGE_SHIFT);
66
67 ret = sg_alloc_table(sgt, npgs, gfp);
68 if (ret)
69 return ret;
70
71 for (i = 0; i < npgs; i++)
72 sgt->sgl->sg_pgs[i] = pgs[i];
73
74 return 0;
75 }
76
77 int
sg_alloc_table_from_pages(struct sg_table * sgt,struct page ** pgs,unsigned npgs,bus_size_t offset,bus_size_t size,gfp_t gfp)78 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs,
79 unsigned npgs, bus_size_t offset, bus_size_t size, gfp_t gfp)
80 {
81
82 return __sg_alloc_table_from_pages(sgt, pgs, npgs, offset, size,
83 -1, gfp);
84 }
85
86 int
sg_alloc_table_from_bus_dmamem(struct sg_table * sgt,bus_dma_tag_t dmat,const bus_dma_segment_t * seg,int nseg,gfp_t gfp)87 sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
88 const bus_dma_segment_t *seg, int nseg, gfp_t gfp)
89 {
90 int i, npgs = 0;
91 int ret;
92
93 KASSERT(nseg >= 1);
94
95 /*
96 * Count the number of pages. Some segments may span multiple
97 * contiguous pages.
98 */
99 for (i = 0; i < nseg; i++) {
100 bus_size_t len = seg[i].ds_len;
101 for (; len >= PAGE_SIZE; len -= PAGE_SIZE, npgs++) {
102 if (npgs == INT_MAX)
103 return -ENOMEM;
104 }
105 KASSERTMSG(len == 0, "misaligned segment length: %ju\n",
106 (uintmax_t)seg[i].ds_len);
107 }
108
109 ret = sg_alloc_table(sgt, npgs, gfp);
110 if (ret)
111 return ret;
112
113 /* XXX errno NetBSD->Linux */
114 ret = -bus_dmamem_export_pages(dmat, seg, nseg, sgt->sgl->sg_pgs,
115 sgt->sgl->sg_npgs);
116 if (ret)
117 goto out;
118
119 /* Success! */
120 ret = 0;
121
122 out: if (ret)
123 sg_free_table(sgt);
124 return ret;
125 }
126
127 void
sg_free_table(struct sg_table * sgt)128 sg_free_table(struct sg_table *sgt)
129 {
130
131 if (sgt->sgl->sg_dmamap) {
132 KASSERT(sgt->sgl->sg_dmat);
133 bus_dmamap_destroy(sgt->sgl->sg_dmat, sgt->sgl->sg_dmamap);
134 }
135 kfree(sgt->sgl->sg_pgs);
136 sgt->sgl->sg_pgs = NULL;
137 sgt->sgl->sg_npgs = 0;
138 }
139
140 int
dma_map_sg(bus_dma_tag_t dmat,struct scatterlist * sg,int nents,int dir)141 dma_map_sg(bus_dma_tag_t dmat, struct scatterlist *sg, int nents, int dir)
142 {
143
144 return dma_map_sg_attrs(dmat, sg, nents, dir, 0);
145 }
146
147 int
dma_map_sg_attrs(bus_dma_tag_t dmat,struct scatterlist * sg,int nents,int dir,int attrs)148 dma_map_sg_attrs(bus_dma_tag_t dmat, struct scatterlist *sg, int nents,
149 int dir, int attrs)
150 {
151 int flags = 0;
152 bool loaded = false;
153 int ret, error = 0;
154
155 KASSERT(sg->sg_dmamap == NULL);
156 KASSERT(sg->sg_npgs);
157 KASSERT(nents >= 1);
158
159 switch (dir) {
160 case DMA_TO_DEVICE:
161 flags |= BUS_DMA_WRITE;
162 break;
163 case DMA_FROM_DEVICE:
164 flags |= BUS_DMA_READ;
165 break;
166 case DMA_BIDIRECTIONAL:
167 flags |= BUS_DMA_READ|BUS_DMA_WRITE;
168 break;
169 case DMA_NONE:
170 default:
171 panic("invalid DMA direction %d", dir);
172 }
173
174 error = bus_dmamap_create(dmat, (bus_size_t)sg->sg_npgs << PAGE_SHIFT,
175 nents, PAGE_SIZE, 0, BUS_DMA_WAITOK, &sg->sg_dmamap);
176 if (error)
177 goto out;
178 KASSERT(sg->sg_dmamap);
179
180 error = bus_dmamap_load_pages(dmat, sg->sg_dmamap, sg->sg_pgs,
181 (bus_size_t)sg->sg_npgs << PAGE_SHIFT, BUS_DMA_WAITOK|flags);
182 if (error)
183 goto out;
184 loaded = true;
185
186 /* Success! */
187 KASSERT(sg->sg_dmamap->dm_nsegs > 0);
188 KASSERT(sg->sg_dmamap->dm_nsegs <= nents);
189 sg->sg_dmat = dmat;
190 ret = sg->sg_dmamap->dm_nsegs;
191 error = 0;
192
193 out: if (error) {
194 if (loaded)
195 bus_dmamap_unload(dmat, sg->sg_dmamap);
196 loaded = false;
197 if (sg->sg_dmamap)
198 bus_dmamap_destroy(dmat, sg->sg_dmamap);
199 sg->sg_dmamap = NULL;
200 ret = 0;
201 }
202 return ret;
203 }
204
205 void
dma_unmap_sg(bus_dma_tag_t dmat,struct scatterlist * sg,int nents,int dir)206 dma_unmap_sg(bus_dma_tag_t dmat, struct scatterlist *sg, int nents, int dir)
207 {
208
209 dma_unmap_sg_attrs(dmat, sg, nents, dir, 0);
210 }
211
212 void
dma_unmap_sg_attrs(bus_dma_tag_t dmat,struct scatterlist * sg,int nents,int dir,int attrs)213 dma_unmap_sg_attrs(bus_dma_tag_t dmat, struct scatterlist *sg, int nents,
214 int dir, int attrs)
215 {
216
217 KASSERT(sg->sg_dmat == dmat);
218
219 bus_dmamap_unload(dmat, sg->sg_dmamap);
220 bus_dmamap_destroy(dmat, sg->sg_dmamap);
221 sg->sg_dmamap = NULL;
222 }
223
224 bus_addr_t
sg_dma_address(const struct scatterlist * sg)225 sg_dma_address(const struct scatterlist *sg)
226 {
227
228 KASSERT(sg->sg_dmamap->dm_nsegs == 1);
229 return sg->sg_dmamap->dm_segs[0].ds_addr;
230 }
231
232 bus_size_t
sg_dma_len(const struct scatterlist * sg)233 sg_dma_len(const struct scatterlist *sg)
234 {
235
236 KASSERT(sg->sg_dmamap->dm_nsegs == 1);
237 return sg->sg_dmamap->dm_segs[0].ds_len;
238 }
239