xref: /dflybsd-src/sys/dev/drm/include/linux/scatterlist.h (revision 31c068aaf635ad9fa72dbc4c65b32d890ff7544d)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef	_LINUX_SCATTERLIST_H_
31 #define	_LINUX_SCATTERLIST_H_
32 
33 /*
34  * SG table design.
35  *
36  * If flags bit 0 is set, then the sg field contains a pointer to the next sg
37  * table list. Otherwise the next entry is at sg + 1, can be determined using
38  * the sg_is_chain() function.
39  *
40  * If flags bit 1 is set, then this sg entry is the last element in a list,
41  * can be determined using the sg_is_last() function.
42  *
43  * See sg_next().
44  *
45  */
46 
47 struct scatterlist {
48 	union {
49 		struct vm_page		*page;
50 		struct scatterlist	*sg;
51 	} sl_un;
52 	dma_addr_t	address;
53 	unsigned long	offset;
54 	uint32_t	length;
55 	uint32_t	flags;
56 };
57 
58 struct sg_table {
59 	struct scatterlist *sgl;        /* the list */
60 	unsigned int nents;             /* number of mapped entries */
61 	unsigned int orig_nents;        /* original size of list */
62 };
63 
64 /*
65  * Maximum number of entries that will be allocated in one piece, if
66  * a list larger than this is required then chaining will be utilized.
67  */
68 #define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
69 
70 #define	sg_dma_address(sg)	(sg)->address
71 #define	sg_dma_len(sg)		(sg)->length
72 #define	sg_page(sg)		(sg)->sl_un.page
73 #define	sg_scatternext(sg)	(sg)->sl_un.sg
74 
75 #define	SG_END		0x01
76 #define	SG_CHAIN	0x02
77 
78 static inline void
79 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len,
80     unsigned int offset)
81 {
82 	sg_page(sg) = page;
83 	sg_dma_len(sg) = len;
84 	sg->offset = offset;
85 	if (offset > PAGE_SIZE)
86 		panic("sg_set_page: Invalid offset %d\n", offset);
87 }
88 
89 #if 0
90 static inline void
91 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
92 {
93 	sg_set_page(sg, virt_to_page(buf), buflen,
94 	    ((uintptr_t)buf) & ~PAGE_MASK);
95 }
96 
97 static inline void
98 sg_init_table(struct scatterlist *sg, unsigned int nents)
99 {
100 	bzero(sg, sizeof(*sg) * nents);
101 	sg[nents - 1].flags = SG_END;
102 }
103 #endif
104 
105 static inline struct scatterlist *
106 sg_next(struct scatterlist *sg)
107 {
108 	if (sg->flags & SG_END)
109 		return (NULL);
110 	sg++;
111 	if (sg->flags & SG_CHAIN)
112 		sg = sg_scatternext(sg);
113 	return (sg);
114 }
115 
116 #if 0
117 static inline vm_paddr_t
118 sg_phys(struct scatterlist *sg)
119 {
120 	return sg_page(sg)->phys_addr + sg->offset;
121 }
122 #endif
123 
124 /**
125  * sg_chain - Chain two sglists together
126  * @prv:        First scatterlist
127  * @prv_nents:  Number of entries in prv
128  * @sgl:        Second scatterlist
129  *
130  * Description:
131  *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
132  *
133  **/
134 static inline void
135 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
136 					struct scatterlist *sgl)
137 {
138 /*
139  * offset and length are unused for chain entry.  Clear them.
140  */
141 	struct scatterlist *sg = &prv[prv_nents - 1];
142 
143 	sg->offset = 0;
144 	sg->length = 0;
145 
146 	/*
147 	 * Indicate a link pointer, and set the link to the second list.
148 	 */
149 	sg->flags = SG_CHAIN;
150 	sg->sl_un.sg = sgl;
151 }
152 
153 /**
154  * sg_mark_end - Mark the end of the scatterlist
155  * @sg:          SG entryScatterlist
156  *
157  * Description:
158  *   Marks the passed in sg entry as the termination point for the sg
159  *   table. A call to sg_next() on this entry will return NULL.
160  *
161  **/
162 static inline void sg_mark_end(struct scatterlist *sg)
163 {
164         sg->flags = SG_END;
165 }
166 
167 #if 0
168 /**
169  * __sg_free_table - Free a previously mapped sg table
170  * @table:      The sg table header to use
171  * @max_ents:   The maximum number of entries per single scatterlist
172  *
173  *  Description:
174  *    Free an sg table previously allocated and setup with
175  *    __sg_alloc_table().  The @max_ents value must be identical to
176  *    that previously used with __sg_alloc_table().
177  *
178  **/
179 static inline void
180 __sg_free_table(struct sg_table *table, unsigned int max_ents)
181 {
182 	struct scatterlist *sgl, *next;
183 
184 	if (unlikely(!table->sgl))
185 		return;
186 
187 	sgl = table->sgl;
188 	while (table->orig_nents) {
189 		unsigned int alloc_size = table->orig_nents;
190 		unsigned int sg_size;
191 
192 		/*
193 		 * If we have more than max_ents segments left,
194 		 * then assign 'next' to the sg table after the current one.
195 		 * sg_size is then one less than alloc size, since the last
196 		 * element is the chain pointer.
197 		 */
198 		if (alloc_size > max_ents) {
199 			next = sgl[max_ents - 1].sl_un.sg;
200 			alloc_size = max_ents;
201 			sg_size = alloc_size - 1;
202 		} else {
203 			sg_size = alloc_size;
204 			next = NULL;
205 		}
206 
207 		table->orig_nents -= sg_size;
208 		kfree(sgl);
209 		sgl = next;
210 	}
211 
212 	table->sgl = NULL;
213 }
214 
215 /**
216  * sg_free_table - Free a previously allocated sg table
217  * @table:      The mapped sg table header
218  *
219  **/
220 static inline void
221 sg_free_table(struct sg_table *table)
222 {
223 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
224 }
225 
226 /**
227  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
228  * @table:      The sg table header to use
229  * @nents:      Number of entries in sg list
230  * @max_ents:   The maximum number of entries the allocator returns per call
231  * @gfp_mask:   GFP allocation mask
232  *
233  * Description:
234  *   This function returns a @table @nents long. The allocator is
235  *   defined to return scatterlist chunks of maximum size @max_ents.
236  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
237  *   chained in units of @max_ents.
238  *
239  * Notes:
240  *   If this function returns non-0 (eg failure), the caller must call
241  *   __sg_free_table() to cleanup any leftover allocations.
242  *
243  **/
244 static inline int
245 __sg_alloc_table(struct sg_table *table, unsigned int nents,
246 		unsigned int max_ents, gfp_t gfp_mask)
247 {
248 	struct scatterlist *sg, *prv;
249 	unsigned int left;
250 
251 	memset(table, 0, sizeof(*table));
252 
253 	if (nents == 0)
254 		return -EINVAL;
255 	left = nents;
256 	prv = NULL;
257 	do {
258 		unsigned int sg_size, alloc_size = left;
259 
260 		if (alloc_size > max_ents) {
261 			alloc_size = max_ents;
262 			sg_size = alloc_size - 1;
263 		} else
264 			sg_size = alloc_size;
265 
266 		left -= sg_size;
267 
268 		sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
269 		if (unlikely(!sg)) {
270 		/*
271 		 * Adjust entry count to reflect that the last
272 		 * entry of the previous table won't be used for
273 		 * linkage.  Without this, sg_kfree() may get
274 		 * confused.
275 		 */
276 			if (prv)
277 				table->nents = ++table->orig_nents;
278 
279 			return -ENOMEM;
280 		}
281 
282 		sg_init_table(sg, alloc_size);
283 		table->nents = table->orig_nents += sg_size;
284 
285 		/*
286 		 * If this is the first mapping, assign the sg table header.
287 		 * If this is not the first mapping, chain previous part.
288 		 */
289 		if (prv)
290 			sg_chain(prv, max_ents, sg);
291 		else
292 			table->sgl = sg;
293 
294 		/*
295 		* If no more entries after this one, mark the end
296 		*/
297 		if (!left)
298 			sg_mark_end(&sg[sg_size - 1]);
299 
300 		prv = sg;
301 	} while (left);
302 
303 	return 0;
304 }
305 
306 /**
307  * sg_alloc_table - Allocate and initialize an sg table
308  * @table:      The sg table header to use
309  * @nents:      Number of entries in sg list
310  * @gfp_mask:   GFP allocation mask
311  *
312  *  Description:
313  *    Allocate and initialize an sg table. If @nents@ is larger than
314  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
315  *
316  **/
317 
318 static inline int
319 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
320 {
321 	int ret;
322 
323 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
324 		gfp_mask);
325 	if (unlikely(ret))
326 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
327 
328 	return ret;
329 }
330 #endif
331 
332 #define	for_each_sg(sglist, sg, sgmax, _itr)				\
333 	for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
334 
335 #endif	/* _LINUX_SCATTERLIST_H_ */
336