1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #ifndef _LINUX_SCATTERLIST_H_ 31 #define _LINUX_SCATTERLIST_H_ 32 33 /* 34 * SG table design. 35 * 36 * If flags bit 0 is set, then the sg field contains a pointer to the next sg 37 * table list. Otherwise the next entry is at sg + 1, can be determined using 38 * the sg_is_chain() function. 39 * 40 * If flags bit 1 is set, then this sg entry is the last element in a list, 41 * can be determined using the sg_is_last() function. 42 * 43 * See sg_next(). 44 * 45 */ 46 47 struct scatterlist { 48 union { 49 struct vm_page *page; 50 struct scatterlist *sg; 51 } sl_un; 52 dma_addr_t address; 53 unsigned long offset; 54 uint32_t length; 55 uint32_t flags; 56 }; 57 58 struct sg_table { 59 struct scatterlist *sgl; /* the list */ 60 unsigned int nents; /* number of mapped entries */ 61 unsigned int orig_nents; /* original size of list */ 62 }; 63 64 /* 65 * Maximum number of entries that will be allocated in one piece, if 66 * a list larger than this is required then chaining will be utilized. 67 */ 68 #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) 69 70 #define sg_dma_address(sg) (sg)->address 71 #define sg_dma_len(sg) (sg)->length 72 #define sg_page(sg) (sg)->sl_un.page 73 #define sg_scatternext(sg) (sg)->sl_un.sg 74 75 #define SG_END 0x01 76 #define SG_CHAIN 0x02 77 78 static inline void 79 sg_set_page(struct scatterlist *sg, struct vm_page *page, unsigned int len, 80 unsigned int offset) 81 { 82 sg_page(sg) = page; 83 sg_dma_len(sg) = len; 84 sg->offset = offset; 85 if (offset > PAGE_SIZE) 86 panic("sg_set_page: Invalid offset %d\n", offset); 87 } 88 89 #if 0 90 static inline void 91 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) 92 { 93 sg_set_page(sg, virt_to_page(buf), buflen, 94 ((uintptr_t)buf) & ~PAGE_MASK); 95 } 96 #endif 97 98 static inline void 99 sg_init_table(struct scatterlist *sg, unsigned int nents) 100 { 101 bzero(sg, sizeof(*sg) * nents); 102 sg[nents - 1].flags = SG_END; 103 } 104 105 static inline struct scatterlist * 106 sg_next(struct scatterlist *sg) 107 { 108 if (sg->flags & SG_END) 109 return (NULL); 110 sg++; 111 if (sg->flags & SG_CHAIN) 112 sg = sg_scatternext(sg); 113 return (sg); 114 } 115 116 #if 0 117 static inline vm_paddr_t 118 sg_phys(struct scatterlist *sg) 119 { 120 return sg_page(sg)->phys_addr + sg->offset; 121 } 122 #endif 123 124 /** 125 * sg_chain - Chain two sglists together 126 * @prv: First scatterlist 127 * @prv_nents: Number of entries in prv 128 * @sgl: Second scatterlist 129 * 130 * Description: 131 * Links @prv@ and @sgl@ together, to form a longer scatterlist. 132 * 133 **/ 134 static inline void 135 sg_chain(struct scatterlist *prv, unsigned int prv_nents, 136 struct scatterlist *sgl) 137 { 138 /* 139 * offset and length are unused for chain entry. Clear them. 140 */ 141 struct scatterlist *sg = &prv[prv_nents - 1]; 142 143 sg->offset = 0; 144 sg->length = 0; 145 146 /* 147 * Indicate a link pointer, and set the link to the second list. 148 */ 149 sg->flags = SG_CHAIN; 150 sg->sl_un.sg = sgl; 151 } 152 153 /** 154 * sg_mark_end - Mark the end of the scatterlist 155 * @sg: SG entryScatterlist 156 * 157 * Description: 158 * Marks the passed in sg entry as the termination point for the sg 159 * table. A call to sg_next() on this entry will return NULL. 160 * 161 **/ 162 static inline void sg_mark_end(struct scatterlist *sg) 163 { 164 sg->flags = SG_END; 165 } 166 167 /** 168 * __sg_free_table - Free a previously mapped sg table 169 * @table: The sg table header to use 170 * @max_ents: The maximum number of entries per single scatterlist 171 * 172 * Description: 173 * Free an sg table previously allocated and setup with 174 * __sg_alloc_table(). The @max_ents value must be identical to 175 * that previously used with __sg_alloc_table(). 176 * 177 **/ 178 static inline void 179 __sg_free_table(struct sg_table *table, unsigned int max_ents) 180 { 181 struct scatterlist *sgl, *next; 182 183 if (unlikely(!table->sgl)) 184 return; 185 186 sgl = table->sgl; 187 while (table->orig_nents) { 188 unsigned int alloc_size = table->orig_nents; 189 unsigned int sg_size; 190 191 /* 192 * If we have more than max_ents segments left, 193 * then assign 'next' to the sg table after the current one. 194 * sg_size is then one less than alloc size, since the last 195 * element is the chain pointer. 196 */ 197 if (alloc_size > max_ents) { 198 next = sgl[max_ents - 1].sl_un.sg; 199 alloc_size = max_ents; 200 sg_size = alloc_size - 1; 201 } else { 202 sg_size = alloc_size; 203 next = NULL; 204 } 205 206 table->orig_nents -= sg_size; 207 kfree(sgl); 208 sgl = next; 209 } 210 211 table->sgl = NULL; 212 } 213 214 /** 215 * sg_free_table - Free a previously allocated sg table 216 * @table: The mapped sg table header 217 * 218 **/ 219 static inline void 220 sg_free_table(struct sg_table *table) 221 { 222 __sg_free_table(table, SG_MAX_SINGLE_ALLOC); 223 } 224 225 /** 226 * __sg_alloc_table - Allocate and initialize an sg table with given allocator 227 * @table: The sg table header to use 228 * @nents: Number of entries in sg list 229 * @max_ents: The maximum number of entries the allocator returns per call 230 * @gfp_mask: GFP allocation mask 231 * 232 * Description: 233 * This function returns a @table @nents long. The allocator is 234 * defined to return scatterlist chunks of maximum size @max_ents. 235 * Thus if @nents is bigger than @max_ents, the scatterlists will be 236 * chained in units of @max_ents. 237 * 238 * Notes: 239 * If this function returns non-0 (eg failure), the caller must call 240 * __sg_free_table() to cleanup any leftover allocations. 241 * 242 **/ 243 static inline int 244 __sg_alloc_table(struct sg_table *table, unsigned int nents, 245 unsigned int max_ents, gfp_t gfp_mask) 246 { 247 struct scatterlist *sg, *prv; 248 unsigned int left; 249 250 memset(table, 0, sizeof(*table)); 251 252 if (nents == 0) 253 return -EINVAL; 254 left = nents; 255 prv = NULL; 256 do { 257 unsigned int sg_size, alloc_size = left; 258 259 if (alloc_size > max_ents) { 260 alloc_size = max_ents; 261 sg_size = alloc_size - 1; 262 } else 263 sg_size = alloc_size; 264 265 left -= sg_size; 266 267 sg = kmalloc(alloc_size * sizeof(struct scatterlist), M_DRM, gfp_mask); 268 if (unlikely(!sg)) { 269 /* 270 * Adjust entry count to reflect that the last 271 * entry of the previous table won't be used for 272 * linkage. Without this, sg_kfree() may get 273 * confused. 274 */ 275 if (prv) 276 table->nents = ++table->orig_nents; 277 278 return -ENOMEM; 279 } 280 281 sg_init_table(sg, alloc_size); 282 table->nents = table->orig_nents += sg_size; 283 284 /* 285 * If this is the first mapping, assign the sg table header. 286 * If this is not the first mapping, chain previous part. 287 */ 288 if (prv) 289 sg_chain(prv, max_ents, sg); 290 else 291 table->sgl = sg; 292 293 /* 294 * If no more entries after this one, mark the end 295 */ 296 if (!left) 297 sg_mark_end(&sg[sg_size - 1]); 298 299 prv = sg; 300 } while (left); 301 302 return 0; 303 } 304 305 /** 306 * sg_alloc_table - Allocate and initialize an sg table 307 * @table: The sg table header to use 308 * @nents: Number of entries in sg list 309 * @gfp_mask: GFP allocation mask 310 * 311 * Description: 312 * Allocate and initialize an sg table. If @nents@ is larger than 313 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. 314 * 315 **/ 316 317 static inline int 318 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 319 { 320 int ret; 321 322 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 323 gfp_mask); 324 if (unlikely(ret)) 325 __sg_free_table(table, SG_MAX_SINGLE_ALLOC); 326 327 return ret; 328 } 329 330 #define for_each_sg(sglist, sg, sgmax, _itr) \ 331 for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg)) 332 333 #endif /* _LINUX_SCATTERLIST_H_ */ 334