xref: /dflybsd-src/sys/dev/drm/include/linux/scatterlist.h (revision 9cefb7c89c63c495157c0c77d761030bf2bea9b0)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef	_LINUX_SCATTERLIST_H_
32 #define	_LINUX_SCATTERLIST_H_
33 
34 #include <linux/bug.h>
35 
36 /*
37  * SG table design.
38  *
39  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
40  * table list. Otherwise the next entry is at sg + 1, can be determined using
41  * the sg_is_chain() function.
42  *
43  * If flags bit 1 is set, then this sg entry is the last element in a list,
44  * can be determined using the sg_is_last() function.
45  *
46  * See sg_next().
47  *
48  */
49 
50 struct scatterlist {
51 	union {
52 		struct vm_page		*page;
53 		struct scatterlist	*sg;
54 	} sl_un;
55 	dma_addr_t	address;
56 	unsigned long	offset;
57 	uint32_t	length;
58 	uint32_t	flags;
59 };
60 
61 struct sg_table {
62 	struct scatterlist *sgl;        /* the list */
63 	unsigned int nents;             /* number of mapped entries */
64 	unsigned int orig_nents;        /* original size of list */
65 };
66 
67 struct sg_page_iter {
68 	struct scatterlist	*sg;
69 	unsigned int		sg_pgoffset;	/* page index */
70 	unsigned int		maxents;
71 };
72 
73 /*
74  * Maximum number of entries that will be allocated in one piece, if
75  * a list larger than this is required then chaining will be utilized.
76  */
77 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
78 
79 #define	sg_dma_address(sg)	(sg)->address
80 #define	sg_dma_len(sg)		(sg)->length
81 #define	sg_page(sg)		(sg)->sl_un.page
82 #define	sg_scatternext(sg)	(sg)->sl_un.sg
83 
84 #define	SG_END		0x01
85 #define	SG_CHAIN	0x02
86 
87 static inline void
88 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
89     unsigned int offset)
90 {
91 	sg_page(sg) = page;
92 	sg_dma_len(sg) = len;
93 	sg->offset = offset;
94 	if (offset > PAGE_SIZE)
95 		panic("sg_set_page: Invalid offset %d\n", offset);
96 }
97 
98 #if 0
99 static inline void
100 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
101 {
102 	sg_set_page(sg, virt_to_page(buf), buflen,
103 	    ((uintptr_t)buf) & ~PAGE_MASK);
104 }
105 #endif
106 
107 static inline void
108 sg_init_table(struct scatterlist *sg, unsigned int nents)
109 {
110 	bzero(sg, sizeof(*sg) * nents);
111 	sg[nents - 1].flags = SG_END;
112 }
113 
114 static inline struct scatterlist *
115 sg_next(struct scatterlist *sg)
116 {
117 	if (sg->flags & SG_END)
118 		return (NULL);
119 	sg++;
120 	if (sg->flags & SG_CHAIN)
121 		sg = sg_scatternext(sg);
122 	return (sg);
123 }
124 
125 #if 0
126 static inline vm_paddr_t
127 sg_phys(struct scatterlist *sg)
128 {
129 	return sg_page(sg)->phys_addr + sg->offset;
130 }
131 #endif
132 
133 /**
134  * sg_chain - Chain two sglists together
135  * @prv:        First scatterlist
136  * @prv_nents:  Number of entries in prv
137  * @sgl:        Second scatterlist
138  *
139  * Description:
140  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
141  *
142  **/
143 static inline void
144 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
145 					struct scatterlist *sgl)
146 {
147 /*
148  * offset and length are unused for chain entry.  Clear them.
149  */
150 	struct scatterlist *sg = &prv[prv_nents - 1];
151 
152 	sg->offset = 0;
153 	sg->length = 0;
154 
155 	/*
156 	 * Indicate a link pointer, and set the link to the second list.
157 	 */
158 	sg->flags = SG_CHAIN;
159 	sg->sl_un.sg = sgl;
160 }
161 
162 /**
163  * sg_mark_end - Mark the end of the scatterlist
164  * @sg:          SG entryScatterlist
165  *
166  * Description:
167  *   Marks the passed in sg entry as the termination point for the sg
168  *   table. A call to sg_next() on this entry will return NULL.
169  *
170  **/
171 static inline void sg_mark_end(struct scatterlist *sg)
172 {
173         sg->flags = SG_END;
174 }
175 
176 /**
177  * __sg_free_table - Free a previously mapped sg table
178  * @table:      The sg table header to use
179  * @max_ents:   The maximum number of entries per single scatterlist
180  *
181  *  Description:
182  *    Free an sg table previously allocated and setup with
183  *    __sg_alloc_table().  The @max_ents value must be identical to
184  *    that previously used with __sg_alloc_table().
185  *
186  **/
187 static inline void
188 __sg_free_table(struct sg_table *table, unsigned int max_ents)
189 {
190 	struct scatterlist *sgl, *next;
191 
192 	if (unlikely(!table->sgl))
193 		return;
194 
195 	sgl = table->sgl;
196 	while (table->orig_nents) {
197 		unsigned int alloc_size = table->orig_nents;
198 		unsigned int sg_size;
199 
200 		/*
201 		 * If we have more than max_ents segments left,
202 		 * then assign 'next' to the sg table after the current one.
203 		 * sg_size is then one less than alloc size, since the last
204 		 * element is the chain pointer.
205 		 */
206 		if (alloc_size > max_ents) {
207 			next = sgl[max_ents - 1].sl_un.sg;
208 			alloc_size = max_ents;
209 			sg_size = alloc_size - 1;
210 		} else {
211 			sg_size = alloc_size;
212 			next = NULL;
213 		}
214 
215 		table->orig_nents -= sg_size;
216 		kfree(sgl);
217 		sgl = next;
218 	}
219 
220 	table->sgl = NULL;
221 }
222 
223 /**
224  * sg_free_table - Free a previously allocated sg table
225  * @table:      The mapped sg table header
226  *
227  **/
228 static inline void
229 sg_free_table(struct sg_table *table)
230 {
231 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
232 }
233 
234 /**
235  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
236  * @table:      The sg table header to use
237  * @nents:      Number of entries in sg list
238  * @max_ents:   The maximum number of entries the allocator returns per call
239  * @gfp_mask:   GFP allocation mask
240  *
241  * Description:
242  *   This function returns a @table @nents long. The allocator is
243  *   defined to return scatterlist chunks of maximum size @max_ents.
244  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
245  *   chained in units of @max_ents.
246  *
247  * Notes:
248  *   If this function returns non-0 (eg failure), the caller must call
249  *   __sg_free_table() to cleanup any leftover allocations.
250  *
251  **/
252 static inline int
253 __sg_alloc_table(struct sg_table *table, unsigned int nents,
254 		unsigned int max_ents, gfp_t gfp_mask)
255 {
256 	struct scatterlist *sg, *prv;
257 	unsigned int left;
258 
259 	memset(table, 0, sizeof(*table));
260 
261 	if (nents == 0)
262 		return -EINVAL;
263 	left = nents;
264 	prv = NULL;
265 	do {
266 		unsigned int sg_size, alloc_size = left;
267 
268 		if (alloc_size > max_ents) {
269 			alloc_size = max_ents;
270 			sg_size = alloc_size - 1;
271 		} else
272 			sg_size = alloc_size;
273 
274 		left -= sg_size;
275 
276 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask);
277 		if (unlikely(!sg)) {
278 		/*
279 		 * Adjust entry count to reflect that the last
280 		 * entry of the previous table won't be used for
281 		 * linkage.  Without this, sg_kfree() may get
282 		 * confused.
283 		 */
284 			if (prv)
285 				table->nents = ++table->orig_nents;
286 
287 			return -ENOMEM;
288 		}
289 
290 		sg_init_table(sg, alloc_size);
291 		table->nents = table->orig_nents += sg_size;
292 
293 		/*
294 		 * If this is the first mapping, assign the sg table header.
295 		 * If this is not the first mapping, chain previous part.
296 		 */
297 		if (prv)
298 			sg_chain(prv, max_ents, sg);
299 		else
300 			table->sgl = sg;
301 
302 		/*
303 		* If no more entries after this one, mark the end
304 		*/
305 		if (!left)
306 			sg_mark_end(&sg[sg_size - 1]);
307 
308 		prv = sg;
309 	} while (left);
310 
311 	return 0;
312 }
313 
314 /**
315  * sg_alloc_table - Allocate and initialize an sg table
316  * @table:      The sg table header to use
317  * @nents:      Number of entries in sg list
318  * @gfp_mask:   GFP allocation mask
319  *
320  *  Description:
321  *    Allocate and initialize an sg table. If @nents@ is larger than
322  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
323  *
324  **/
325 
326 static inline int
327 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
328 {
329 	int ret;
330 
331 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
332 		gfp_mask);
333 	if (unlikely(ret))
334 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
335 
336 	return ret;
337 }
338 
339 /*
340  * Iterate pages in sg list.
341  */
342 static inline void
343 _sg_iter_next(struct sg_page_iter *iter)
344 {
345 	struct scatterlist *sg;
346 	unsigned int pgcount;
347 
348 	sg = iter->sg;
349 	pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
350 
351 	++iter->sg_pgoffset;
352 	while (iter->sg_pgoffset >= pgcount) {
353 		iter->sg_pgoffset -= pgcount;
354 		sg = sg_next(sg);
355 		--iter->maxents;
356 		if (sg == NULL || iter->maxents == 0)
357 			break;
358 		pgcount = (sg->offset + sg->length + PAGE_MASK) >> PAGE_SHIFT;
359 	}
360 	iter->sg = sg;
361 }
362 
363 /*
364  * NOTE: pgoffset is really a page index, not a byte offset.
365  */
366 static inline void
367 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
368 	      unsigned int nents, unsigned long pgoffset)
369 {
370 	if (nents) {
371 		/*
372 		 * Nominal case.  Note subtract 1 from starting page index
373 		 * for initial _sg_iter_next() call.
374 		 */
375 		iter->sg = sgl;
376 		iter->sg_pgoffset = pgoffset - 1;
377 		iter->maxents = nents;
378 		_sg_iter_next(iter);
379 	} else {
380 		/*
381 		 * Degenerate case
382 		 */
383 		iter->sg = NULL;
384 		iter->sg_pgoffset = 0;
385 		iter->maxents = 0;
386 	}
387 }
388 
389 static inline dma_addr_t
390 sg_page_iter_dma_address(struct sg_page_iter *spi)
391 {
392 	return spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT);
393 }
394 
395 #define for_each_sg_page(sgl, iter, nents, pgoffset)			\
396 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
397 	     (iter)->sg; _sg_iter_next(iter))
398 
399 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
400 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
401 
402 #endif	/* _LINUX_SCATTERLIST_H_ */
403