xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c (revision 92049612ada1e74421fa079bee42c3c9d7ee408e)
1 /*	$NetBSD: i915_gem_pages.c,v 1.7 2024/01/19 22:22:54 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2014-2016 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_pages.c,v 1.7 2024/01/19 22:22:54 riastradh Exp $");
11 
12 #include "i915_drv.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_gem_lmem.h"
16 #include "i915_gem_mman.h"
17 
18 #ifdef __NetBSD__
19 #include <sys/param.h>
20 #include <uvm/uvm_extern.h>
21 #endif
22 
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages,unsigned int sg_page_sizes)23 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
24 				 struct sg_table *pages,
25 				 unsigned int sg_page_sizes)
26 {
27 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
28 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
29 	int i;
30 
31 	lockdep_assert_held(&obj->mm.lock);
32 
33 	if (i915_gem_object_is_volatile(obj))
34 		obj->mm.madv = I915_MADV_DONTNEED;
35 
36 	/* Make the pages coherent with the GPU (flushing any swapin). */
37 	if (obj->cache_dirty) {
38 		obj->write_domain = 0;
39 		if (i915_gem_object_has_struct_page(obj))
40 			drm_clflush_sg(pages);
41 		obj->cache_dirty = false;
42 	}
43 
44 #ifndef __NetBSD__
45 	/*
46 	 * Paranoia: In NetBSD, a scatterlist is just an array of
47 	 * pages, not an array of segments that might be larger than
48 	 * pages, so the number of entries must exactly match the size
49 	 * of the object (which should also be page-aligned).
50 	 *
51 	 * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in
52 	 * i915_gem_mman.c rely on this page array as such.
53 	 */
54 	KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT,
55 	    "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size);
56 
57 	obj->mm.get_page.sg_pos = pages->sgl;
58 	obj->mm.get_page.sg_idx = 0;
59 #endif
60 
61 	obj->mm.pages = pages;
62 
63 	if (i915_gem_object_is_tiled(obj) &&
64 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
65 		GEM_BUG_ON(obj->mm.quirked);
66 		__i915_gem_object_pin_pages(obj);
67 		obj->mm.quirked = true;
68 	}
69 
70 	GEM_BUG_ON(!sg_page_sizes);
71 	obj->mm.page_sizes.phys = sg_page_sizes;
72 
73 	/*
74 	 * Calculate the supported page-sizes which fit into the given
75 	 * sg_page_sizes. This will give us the page-sizes which we may be able
76 	 * to use opportunistically when later inserting into the GTT. For
77 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
78 	 * 64K or 4K pages, although in practice this will depend on a number of
79 	 * other factors.
80 	 */
81 	obj->mm.page_sizes.sg = 0;
82 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
83 		if (obj->mm.page_sizes.phys & ~0u << i)
84 			obj->mm.page_sizes.sg |= BIT(i);
85 	}
86 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
87 
88 	if (i915_gem_object_is_shrinkable(obj)) {
89 		struct list_head *list;
90 		unsigned long flags;
91 
92 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
93 
94 		i915->mm.shrink_count++;
95 		i915->mm.shrink_memory += obj->base.size;
96 
97 		if (obj->mm.madv != I915_MADV_WILLNEED)
98 			list = &i915->mm.purge_list;
99 		else
100 			list = &i915->mm.shrink_list;
101 		list_add_tail(&obj->mm.link, list);
102 
103 		atomic_set(&obj->mm.shrink_pin, 0);
104 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
105 	}
106 }
107 
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)108 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
109 {
110 	int err;
111 
112 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
113 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
114 		return -EFAULT;
115 	}
116 
117 	err = obj->ops->get_pages(obj);
118 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
119 
120 	return err;
121 }
122 
123 /* Ensure that the associated pages are gathered from the backing storage
124  * and pinned into our object. i915_gem_object_pin_pages() may be called
125  * multiple times before they are released by a single call to
126  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
127  * either as a result of memory pressure (reaping pages under the shrinker)
128  * or as the object is itself released.
129  */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)130 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
131 {
132 	int err;
133 
134 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
135 	if (err)
136 		return err;
137 
138 	if (unlikely(!i915_gem_object_has_pages(obj))) {
139 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
140 
141 		err = ____i915_gem_object_get_pages(obj);
142 		if (err)
143 			goto unlock;
144 
145 		smp_mb__before_atomic();
146 	}
147 	atomic_inc(&obj->mm.pages_pin_count);
148 
149 unlock:
150 	mutex_unlock(&obj->mm.lock);
151 	return err;
152 }
153 
154 /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)155 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
156 {
157 	drm_gem_free_mmap_offset(&obj->base);
158 	if (obj->ops->truncate)
159 		obj->ops->truncate(obj);
160 }
161 
162 /* Try to discard unwanted pages */
i915_gem_object_writeback(struct drm_i915_gem_object * obj)163 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
164 {
165 	lockdep_assert_held(&obj->mm.lock);
166 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
167 
168 	if (obj->ops->writeback)
169 		obj->ops->writeback(obj);
170 }
171 
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)172 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
173 {
174 	struct radix_tree_iter iter;
175 	void __rcu **slot;
176 
177 	rcu_read_lock();
178 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
179 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
180 	rcu_read_unlock();
181 }
182 
unmap_object(struct drm_i915_gem_object * obj,void * ptr)183 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
184 {
185 #ifdef __NetBSD__
186 	pmap_kremove((vaddr_t)ptr, obj->base.size);
187 	pmap_update(pmap_kernel());
188 	uvm_km_free(kernel_map, (vaddr_t)ptr, obj->base.size, UVM_KMF_VAONLY);
189 #else
190 	if (is_vmalloc_addr(ptr))
191 		vunmap(ptr);
192 	else
193 		kunmap(kmap_to_page(ptr));
194 #endif
195 }
196 
197 struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)198 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
199 {
200 	struct sg_table *pages;
201 
202 	pages = fetch_and_zero(&obj->mm.pages);
203 	if (IS_ERR_OR_NULL(pages))
204 		return pages;
205 
206 	if (i915_gem_object_is_volatile(obj))
207 		obj->mm.madv = I915_MADV_WILLNEED;
208 
209 	i915_gem_object_make_unshrinkable(obj);
210 
211 	if (obj->mm.mapping) {
212 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
213 		obj->mm.mapping = NULL;
214 	}
215 
216 	__i915_gem_object_reset_page_iter(obj);
217 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
218 
219 	return pages;
220 }
221 
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)222 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
223 {
224 	struct sg_table *pages;
225 	int err;
226 
227 	if (i915_gem_object_has_pinned_pages(obj))
228 		return -EBUSY;
229 
230 	GEM_BUG_ON(atomic_read(&obj->bind_count));
231 
232 	/* May be called by shrinker from within get_pages() (on another bo) */
233 	mutex_lock(&obj->mm.lock);
234 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
235 		err = -EBUSY;
236 		goto unlock;
237 	}
238 
239 	i915_gem_object_release_mmap_offset(obj);
240 
241 	/*
242 	 * ->put_pages might need to allocate memory for the bit17 swizzle
243 	 * array, hence protect them from being reaped by removing them from gtt
244 	 * lists early.
245 	 */
246 	pages = __i915_gem_object_unset_pages(obj);
247 
248 	/*
249 	 * XXX Temporary hijinx to avoid updating all backends to handle
250 	 * NULL pages. In the future, when we have more asynchronous
251 	 * get_pages backends we should be better able to handle the
252 	 * cancellation of the async task in a more uniform manner.
253 	 */
254 	if (!pages && !i915_gem_object_needs_async_cancel(obj))
255 		pages = ERR_PTR(-EINVAL);
256 
257 	if (!IS_ERR(pages))
258 		obj->ops->put_pages(obj, pages);
259 
260 	err = 0;
261 unlock:
262 	mutex_unlock(&obj->mm.lock);
263 
264 	return err;
265 }
266 
267 #ifndef __NetBSD__
iomap_pte(resource_size_t base,dma_addr_t offset,pgprot_t prot)268 static inline pte_t iomap_pte(resource_size_t base,
269 			      dma_addr_t offset,
270 			      pgprot_t prot)
271 {
272 	return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
273 }
274 #endif
275 
276 /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map(struct drm_i915_gem_object * obj,enum i915_map_type type)277 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
278 				 enum i915_map_type type)
279 {
280 #ifdef __NetBSD__
281 	vaddr_t va;
282 	struct page *page;
283 	paddr_t pa;
284 	unsigned i;
285 	int kmflags = UVM_KMF_VAONLY|UVM_KMF_WAITVA;
286 	int prot = VM_PROT_READ|VM_PROT_WRITE;
287 	int flags = 0;
288 
289 	/*
290 	 * XXX Be nice if we had bus_dmamem segments so we could use
291 	 * bus_dmamem_map, but we don't so we can't.
292 	 */
293 
294 	/* Verify the object is reasonable to map.  */
295 	/* XXX sync with below */
296 	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
297 		return NULL;
298 
299 	/* Incorporate mapping type into pmap flags.  */
300 	switch (type) {
301 	case I915_MAP_WC:
302 		flags |= PMAP_WRITE_COMBINE;
303 		break;
304 	case I915_MAP_WB:
305 	default:
306 		break;
307 	}
308 
309 	/* Allow failure if >1 page.  */
310 	if (obj->base.size > PAGE_SIZE)
311 		kmflags |= UVM_KMF_CANFAIL;
312 
313 	/* Allocate a contiguous chunk of KVA.  */
314 	va = uvm_km_alloc(kernel_map, obj->base.size, PAGE_SIZE, kmflags);
315 	if (va == 0)
316 		return NULL;
317 
318 	/* Wire the KVA to the right physical addresses.  */
319 	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
320 		page = obj->mm.pages->sgl->sg_pgs[i];
321 		pa = VM_PAGE_TO_PHYS(&page->p_vmp);
322 		pmap_kenter_pa(va + i*PAGE_SIZE, pa, prot, flags);
323 	}
324 	pmap_update(pmap_kernel());
325 
326 	return (void *)va;
327 #else
328 	unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
329 	struct sg_table *sgt = obj->mm.pages;
330 	pte_t *stack[32], **mem;
331 	struct vm_struct *area;
332 	pgprot_t pgprot;
333 
334 	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
335 		return NULL;
336 
337 	/* A single page can always be kmapped */
338 	if (n_pte == 1 && type == I915_MAP_WB)
339 		return kmap(sg_page(sgt->sgl));
340 
341 	mem = stack;
342 	if (n_pte > ARRAY_SIZE(stack)) {
343 		/* Too big for stack -- allocate temporary array instead */
344 		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
345 		if (!mem)
346 			return NULL;
347 	}
348 
349 	area = alloc_vm_area(obj->base.size, mem);
350 	if (!area) {
351 		if (mem != stack)
352 			kvfree(mem);
353 		return NULL;
354 	}
355 
356 	switch (type) {
357 	default:
358 		MISSING_CASE(type);
359 		/* fallthrough - to use PAGE_KERNEL anyway */
360 	case I915_MAP_WB:
361 		pgprot = PAGE_KERNEL;
362 		break;
363 	case I915_MAP_WC:
364 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
365 		break;
366 	}
367 
368 	if (i915_gem_object_has_struct_page(obj)) {
369 		struct sgt_iter iter;
370 		struct page *page;
371 		pte_t **ptes = mem;
372 
373 		for_each_sgt_page(page, iter, sgt)
374 			**ptes++ = mk_pte(page, pgprot);
375 	} else {
376 		resource_size_t iomap;
377 		struct sgt_iter iter;
378 		pte_t **ptes = mem;
379 		dma_addr_t addr;
380 
381 		iomap = obj->mm.region->iomap.base;
382 		iomap -= obj->mm.region->region.start;
383 
384 		for_each_sgt_daddr(addr, iter, sgt)
385 			**ptes++ = iomap_pte(iomap, addr, pgprot);
386 	}
387 
388 	if (mem != stack)
389 		kvfree(mem);
390 
391 	return area->addr;
392 #endif
393 }
394 
395 /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)396 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
397 			      enum i915_map_type type)
398 {
399 	enum i915_map_type has_type;
400 	unsigned int flags;
401 	bool pinned;
402 	void *ptr;
403 	int err;
404 
405 	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
406 	if (!i915_gem_object_type_has(obj, flags))
407 		return ERR_PTR(-ENXIO);
408 
409 	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
410 	if (err)
411 		return ERR_PTR(err);
412 
413 	pinned = !(type & I915_MAP_OVERRIDE);
414 	type &= ~I915_MAP_OVERRIDE;
415 
416 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
417 		if (unlikely(!i915_gem_object_has_pages(obj))) {
418 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
419 
420 			err = ____i915_gem_object_get_pages(obj);
421 			if (err)
422 				goto err_unlock;
423 
424 			smp_mb__before_atomic();
425 		}
426 		atomic_inc(&obj->mm.pages_pin_count);
427 		pinned = false;
428 	}
429 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
430 
431 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
432 	if (ptr && has_type != type) {
433 		if (pinned) {
434 			err = -EBUSY;
435 			goto err_unpin;
436 		}
437 
438 		unmap_object(obj, ptr);
439 
440 		ptr = obj->mm.mapping = NULL;
441 	}
442 
443 	if (!ptr) {
444 		ptr = i915_gem_object_map(obj, type);
445 		if (!ptr) {
446 			err = -ENOMEM;
447 			goto err_unpin;
448 		}
449 
450 		obj->mm.mapping = page_pack_bits(ptr, type);
451 	}
452 
453 out_unlock:
454 	mutex_unlock(&obj->mm.lock);
455 	return ptr;
456 
457 err_unpin:
458 	atomic_dec(&obj->mm.pages_pin_count);
459 err_unlock:
460 	ptr = ERR_PTR(err);
461 	goto out_unlock;
462 }
463 
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)464 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
465 				 unsigned long offset,
466 				 unsigned long size)
467 {
468 	enum i915_map_type has_type;
469 	void *ptr;
470 
471 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
472 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
473 				     offset, size, obj->base.size));
474 
475 	obj->mm.dirty = true;
476 
477 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
478 		return;
479 
480 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
481 	if (has_type == I915_MAP_WC)
482 		return;
483 
484 	drm_clflush_virt_range(ptr + offset, size);
485 	if (size == obj->base.size) {
486 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
487 		obj->cache_dirty = false;
488 	}
489 }
490 
491 #ifndef __NetBSD__
492 struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object * obj,unsigned int n,unsigned int * offset)493 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
494 		       unsigned int n,
495 		       unsigned int *offset)
496 {
497 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
498 	struct scatterlist *sg;
499 	unsigned int idx, count;
500 
501 	might_sleep();
502 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
503 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
504 
505 	/* As we iterate forward through the sg, we record each entry in a
506 	 * radixtree for quick repeated (backwards) lookups. If we have seen
507 	 * this index previously, we will have an entry for it.
508 	 *
509 	 * Initial lookup is O(N), but this is amortized to O(1) for
510 	 * sequential page access (where each new request is consecutive
511 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
512 	 * i.e. O(1) with a large constant!
513 	 */
514 	if (n < READ_ONCE(iter->sg_idx))
515 		goto lookup;
516 
517 	mutex_lock(&iter->lock);
518 
519 	/* We prefer to reuse the last sg so that repeated lookup of this
520 	 * (or the subsequent) sg are fast - comparing against the last
521 	 * sg is faster than going through the radixtree.
522 	 */
523 
524 	sg = iter->sg_pos;
525 	idx = iter->sg_idx;
526 	count = __sg_page_count(sg);
527 
528 	while (idx + count <= n) {
529 		void *entry;
530 		unsigned long i;
531 		int ret;
532 
533 		/* If we cannot allocate and insert this entry, or the
534 		 * individual pages from this range, cancel updating the
535 		 * sg_idx so that on this lookup we are forced to linearly
536 		 * scan onwards, but on future lookups we will try the
537 		 * insertion again (in which case we need to be careful of
538 		 * the error return reporting that we have already inserted
539 		 * this index).
540 		 */
541 		ret = radix_tree_insert(&iter->radix, idx, sg);
542 		if (ret && ret != -EEXIST)
543 			goto scan;
544 
545 		entry = xa_mk_value(idx);
546 		for (i = 1; i < count; i++) {
547 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
548 			if (ret && ret != -EEXIST)
549 				goto scan;
550 		}
551 
552 		idx += count;
553 		sg = ____sg_next(sg);
554 		count = __sg_page_count(sg);
555 	}
556 
557 scan:
558 	iter->sg_pos = sg;
559 	iter->sg_idx = idx;
560 
561 	mutex_unlock(&iter->lock);
562 
563 	if (unlikely(n < idx)) /* insertion completed by another thread */
564 		goto lookup;
565 
566 	/* In case we failed to insert the entry into the radixtree, we need
567 	 * to look beyond the current sg.
568 	 */
569 	while (idx + count <= n) {
570 		idx += count;
571 		sg = ____sg_next(sg);
572 		count = __sg_page_count(sg);
573 	}
574 
575 	*offset = n - idx;
576 	return sg;
577 
578 lookup:
579 	rcu_read_lock();
580 
581 	sg = radix_tree_lookup(&iter->radix, n);
582 	GEM_BUG_ON(!sg);
583 
584 	/* If this index is in the middle of multi-page sg entry,
585 	 * the radix tree will contain a value entry that points
586 	 * to the start of that range. We will return the pointer to
587 	 * the base page and the offset of this page within the
588 	 * sg entry's range.
589 	 */
590 	*offset = 0;
591 	if (unlikely(xa_is_value(sg))) {
592 		unsigned long base = xa_to_value(sg);
593 
594 		sg = radix_tree_lookup(&iter->radix, base);
595 		GEM_BUG_ON(!sg);
596 
597 		*offset = n - base;
598 	}
599 
600 	rcu_read_unlock();
601 
602 	return sg;
603 }
604 #endif
605 
606 struct page *
i915_gem_object_get_page(struct drm_i915_gem_object * obj,unsigned int n)607 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
608 {
609 #ifdef __NetBSD__
610 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
611 	KASSERT(n < obj->mm.pages->sgl->sg_npgs);
612 	return obj->mm.pages->sgl->sg_pgs[n];
613 #else
614 	struct scatterlist *sg;
615 	unsigned int offset;
616 
617 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
618 
619 	sg = i915_gem_object_get_sg(obj, n, &offset);
620 	return nth_page(sg_page(sg), offset);
621 #endif
622 }
623 
624 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
625 struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,unsigned int n)626 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
627 			       unsigned int n)
628 {
629 	struct page *page;
630 
631 	page = i915_gem_object_get_page(obj, n);
632 	if (!obj->mm.dirty)
633 		set_page_dirty(page);
634 
635 	return page;
636 }
637 
638 dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,unsigned long n,unsigned int * len)639 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
640 				    unsigned long n,
641 				    unsigned int *len)
642 {
643 #ifdef __NetBSD__
644 	bus_dmamap_t map = obj->mm.pages->sgl->sg_dmamap;
645 	bus_addr_t poff = (bus_addr_t)n << PAGE_SHIFT;
646 	unsigned seg;
647 
648 	for (seg = 0; seg < map->dm_nsegs; seg++) {
649 		if (poff < map->dm_segs[seg].ds_len) {
650 			*len = map->dm_segs[seg].ds_len - poff;
651 			return map->dm_segs[seg].ds_addr + poff;
652 		}
653 		poff -= map->dm_segs[seg].ds_len;
654 	}
655 	KASSERT(0);
656 	return 0;
657 #else
658 	struct scatterlist *sg;
659 	unsigned int offset;
660 
661 	sg = i915_gem_object_get_sg(obj, n, &offset);
662 
663 	if (len)
664 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
665 
666 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
667 #endif
668 }
669 
670 dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,unsigned long n)671 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
672 				unsigned long n)
673 {
674 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
675 }
676