Lines Matching full:pages
19 struct sg_table *pages) in __i915_gem_object_set_pages() argument
31 /* Make the pages coherent with the GPU (flushing any swapin). */ in __i915_gem_object_set_pages()
36 drm_clflush_sg(pages); in __i915_gem_object_set_pages()
40 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
42 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages()
45 obj->mm.pages = pages; in __i915_gem_object_set_pages()
47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages()
55 * 64K or 4K pages, although in practice this will depend on a number of in __i915_gem_object_set_pages()
116 /* Ensure that the associated pages are gathered from the backing storage
119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120 * either as a result of memory pressure (reaping pages under the shrinker)
211 struct sg_table *pages; in __i915_gem_object_unset_pages() local
215 pages = fetch_and_zero(&obj->mm.pages); in __i915_gem_object_unset_pages()
216 if (IS_ERR_OR_NULL(pages)) in __i915_gem_object_unset_pages()
217 return pages; in __i915_gem_object_unset_pages()
235 return pages; in __i915_gem_object_unset_pages()
240 struct sg_table *pages; in __i915_gem_object_put_pages() local
255 pages = __i915_gem_object_unset_pages(obj); in __i915_gem_object_put_pages()
259 * NULL pages. In the future, when we have more asynchronous in __i915_gem_object_put_pages()
263 if (!IS_ERR_OR_NULL(pages)) in __i915_gem_object_put_pages()
264 obj->ops->put_pages(obj, pages); in __i915_gem_object_put_pages()
274 struct vm_page *stack[32], **pages = stack, *page; in i915_gem_object_map_page() local
286 * vmap) to provide virtual mappings of the high pages. in i915_gem_object_map_page()
302 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) in i915_gem_object_map_page()
303 return page_address(sg_page(obj->mm.pages->sgl)); in i915_gem_object_map_page()
314 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in i915_gem_object_map_page()
315 if (!pages) in i915_gem_object_map_page()
320 for_each_sgt_page(page, iter, obj->mm.pages) in i915_gem_object_map_page()
321 pages[i++] = page; in i915_gem_object_map_page()
322 vaddr = vmap(pages, n_pages, 0, pgprot); in i915_gem_object_map_page()
323 if (pages != stack) in i915_gem_object_map_page()
324 kvfree(pages); in i915_gem_object_map_page()
350 for_each_sgt_daddr(addr, iter, obj->mm.pages) in i915_gem_object_map_pfn()
359 /* get, pin, and map the pages of the object into kernel space */
403 * pages should be allocated and mapped as write-combined only. in i915_gem_object_pin_map()
506 * We allow removing the mapping from underneath pinned pages! in __i915_gem_object_release_map()
562 * individual pages from this range, cancel updating the in __i915_gem_object_page_iter_get_sg()