1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef I915_SCATTERLIST_H 8 #define I915_SCATTERLIST_H 9 10 #include <linux/pfn.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-mapping.h> 13 #include <xen/xen.h> 14 15 #include "i915_gem.h" 16 17 struct drm_mm_node; 18 struct ttm_resource; 19 20 /* 21 * Optimised SGL iterator for GEM objects 22 */ 23 static __always_inline struct sgt_iter { 24 struct scatterlist *sgp; 25 union { 26 unsigned long pfn; 27 dma_addr_t dma; 28 }; 29 unsigned int curr; 30 unsigned int max; 31 } __sgt_iter(struct scatterlist *sgl, bool dma) { 32 struct sgt_iter s = { .sgp = sgl }; 33 34 if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { 35 s.sgp = NULL; 36 } else if (s.sgp) { 37 s.max = s.curr = s.sgp->offset; 38 if (dma) { 39 s.dma = sg_dma_address(s.sgp); 40 s.max += sg_dma_len(s.sgp); 41 } else { 42 s.pfn = page_to_pfn(sg_page(s.sgp)); 43 s.max += s.sgp->length; 44 } 45 } 46 47 return s; 48 } 49 50 static inline int __sg_page_count(const struct scatterlist *sg) 51 { 52 return sg->length >> PAGE_SHIFT; 53 } 54 55 static inline int __sg_dma_page_count(const struct scatterlist *sg) 56 { 57 return sg_dma_len(sg) >> PAGE_SHIFT; 58 } 59 60 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 61 { 62 ++sg; 63 if (unlikely(sg_is_chain(sg))) 64 sg = sg_chain_ptr(sg); 65 return sg; 66 } 67 68 /** 69 * __sg_next - return the next scatterlist entry in a list 70 * @sg: The current sg entry 71 * 72 * Description: 73 * If the entry is the last, return NULL; otherwise, step to the next 74 * element in the array (@sg@+1). If that's a chain pointer, follow it; 75 * otherwise just return the pointer to the current element. 76 **/ 77 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 78 { 79 return sg_is_last(sg) ? NULL : ____sg_next(sg); 80 } 81 82 /** 83 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table 84 * @__dp: Device address (output) 85 * @__iter: 'struct sgt_iter' (iterator state, internal) 86 * @__sgt: sg_table to iterate over (input) 87 * @__step: step size 88 */ 89 #define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ 90 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 91 ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ 92 (((__iter).curr += (__step)) >= (__iter).max) ? \ 93 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 94 /** 95 * __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator. 96 * @__dp: Device address (output) 97 * @__iter: 'struct sgt_iter' (iterator state, external) 98 * @__step: step size 99 */ 100 #define __for_each_daddr_next(__dp, __iter, __step) \ 101 for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ 102 (((__iter).curr += (__step)) >= (__iter).max) ? \ 103 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 104 105 /** 106 * for_each_sgt_page - iterate over the pages of the given sg_table 107 * @__pp: page pointer (output) 108 * @__iter: 'struct sgt_iter' (iterator state, internal) 109 * @__sgt: sg_table to iterate over (input) 110 */ 111 #define for_each_sgt_page(__pp, __iter, __sgt) \ 112 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 113 ((__pp) = (__iter).pfn == 0 ? NULL : \ 114 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 115 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 116 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 117 118 /** 119 * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist 120 * @sg: The scatterlist 121 * 122 * Return: An unsigned int with segment sizes logically or'ed together. 123 * A caller can use this information to determine what hardware page table 124 * entry sizes can be used to map the memory represented by the scatterlist. 125 */ 126 static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) 127 { 128 unsigned int page_sizes; 129 130 page_sizes = 0; 131 while (sg && sg_dma_len(sg)) { 132 GEM_BUG_ON(sg->offset); 133 GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE)); 134 page_sizes |= sg_dma_len(sg); 135 sg = __sg_next(sg); 136 } 137 138 return page_sizes; 139 } 140 141 #ifdef __linux__ 142 static inline unsigned int i915_sg_segment_size(struct device *dev) 143 { 144 size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev)); 145 146 /* 147 * For Xen PV guests pages aren't contiguous in DMA (machine) address 148 * space. The DMA API takes care of that both in dma_alloc_* (by 149 * calling into the hypervisor to make the pages contiguous) and in 150 * dma_map_* (by bounce buffering). But i915 abuses ignores the 151 * coherency aspects of the DMA API and thus can't cope with bounce 152 * buffering actually happening, so add a hack here to force small 153 * allocations and mappings when running in PV mode on Xen. 154 * 155 * Note this will still break if bounce buffering is required for other 156 * reasons, like confidential computing hypervisors or PCIe root ports 157 * with addressing limitations. 158 */ 159 if (xen_pv_domain()) 160 max = PAGE_SIZE; 161 return round_down(max, PAGE_SIZE); 162 } 163 #else 164 static inline unsigned int i915_sg_segment_size(struct device *dev) 165 { 166 return PAGE_SIZE; 167 } 168 #endif 169 170 bool i915_sg_trim(struct sg_table *orig_st); 171 172 /** 173 * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt 174 */ 175 struct i915_refct_sgt_ops { 176 /** 177 * @release: Free the memory of the struct i915_refct_sgt 178 */ 179 void (*release)(struct kref *ref); 180 }; 181 182 /** 183 * struct i915_refct_sgt - A refcounted scatter-gather table 184 * @kref: struct kref for refcounting 185 * @table: struct sg_table holding the scatter-gather table itself. Note that 186 * @table->sgl = NULL can be used to determine whether a scatter-gather table 187 * is present or not. 188 * @size: The size in bytes of the underlying memory buffer 189 * @ops: The operations structure. 190 */ 191 struct i915_refct_sgt { 192 struct kref kref; 193 struct sg_table table; 194 size_t size; 195 const struct i915_refct_sgt_ops *ops; 196 }; 197 198 /** 199 * i915_refct_sgt_put - Put a refcounted sg-table 200 * @rsgt: the struct i915_refct_sgt to put. 201 */ 202 static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt) 203 { 204 if (rsgt) 205 kref_put(&rsgt->kref, rsgt->ops->release); 206 } 207 208 /** 209 * i915_refct_sgt_get - Get a refcounted sg-table 210 * @rsgt: the struct i915_refct_sgt to get. 211 */ 212 static inline struct i915_refct_sgt * 213 i915_refct_sgt_get(struct i915_refct_sgt *rsgt) 214 { 215 kref_get(&rsgt->kref); 216 return rsgt; 217 } 218 219 /** 220 * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom 221 * operations structure 222 * @rsgt: The struct i915_refct_sgt to initialize. 223 * @size: Size in bytes of the underlying memory buffer. 224 * @ops: A customized operations structure in case the refcounted sg-list 225 * is embedded into another structure. 226 */ 227 static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, 228 size_t size, 229 const struct i915_refct_sgt_ops *ops) 230 { 231 kref_init(&rsgt->kref); 232 rsgt->table.sgl = NULL; 233 rsgt->size = size; 234 rsgt->ops = ops; 235 } 236 237 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); 238 239 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, 240 u64 region_start, 241 u32 page_alignment); 242 243 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, 244 u64 region_start, 245 u32 page_alignment); 246 247 #endif 248