1 /* $NetBSD: huge_gem_object.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2016 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: huge_gem_object.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11
12 #include "i915_scatterlist.h"
13
14 #include "huge_gem_object.h"
15
huge_free_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)16 static void huge_free_pages(struct drm_i915_gem_object *obj,
17 struct sg_table *pages)
18 {
19 unsigned long nreal = obj->scratch / PAGE_SIZE;
20 struct sgt_iter sgt_iter;
21 struct page *page;
22
23 for_each_sgt_page(page, sgt_iter, pages) {
24 __free_page(page);
25 if (!--nreal)
26 break;
27 }
28
29 sg_free_table(pages);
30 kfree(pages);
31 }
32
huge_get_pages(struct drm_i915_gem_object * obj)33 static int huge_get_pages(struct drm_i915_gem_object *obj)
34 {
35 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
36 const unsigned long nreal = obj->scratch / PAGE_SIZE;
37 const unsigned long npages = obj->base.size / PAGE_SIZE;
38 struct scatterlist *sg, *src, *end;
39 struct sg_table *pages;
40 unsigned long n;
41
42 pages = kmalloc(sizeof(*pages), GFP);
43 if (!pages)
44 return -ENOMEM;
45
46 if (sg_alloc_table(pages, npages, GFP)) {
47 kfree(pages);
48 return -ENOMEM;
49 }
50
51 sg = pages->sgl;
52 for (n = 0; n < nreal; n++) {
53 struct page *page;
54
55 page = alloc_page(GFP | __GFP_HIGHMEM);
56 if (!page) {
57 sg_mark_end(sg);
58 goto err;
59 }
60
61 sg_set_page(sg, page, PAGE_SIZE, 0);
62 sg = __sg_next(sg);
63 }
64 if (nreal < npages) {
65 for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
66 sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
67 src = __sg_next(src);
68 if (src == end)
69 src = pages->sgl;
70 }
71 }
72
73 if (i915_gem_gtt_prepare_pages(obj, pages))
74 goto err;
75
76 __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
77
78 return 0;
79
80 err:
81 huge_free_pages(obj, pages);
82 return -ENOMEM;
83 #undef GFP
84 }
85
huge_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)86 static void huge_put_pages(struct drm_i915_gem_object *obj,
87 struct sg_table *pages)
88 {
89 i915_gem_gtt_finish_pages(obj, pages);
90 huge_free_pages(obj, pages);
91
92 obj->mm.dirty = false;
93 }
94
95 static const struct drm_i915_gem_object_ops huge_ops = {
96 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
97 I915_GEM_OBJECT_IS_SHRINKABLE,
98 .get_pages = huge_get_pages,
99 .put_pages = huge_put_pages,
100 };
101
102 struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private * i915,phys_addr_t phys_size,dma_addr_t dma_size)103 huge_gem_object(struct drm_i915_private *i915,
104 phys_addr_t phys_size,
105 dma_addr_t dma_size)
106 {
107 static struct lock_class_key lock_class;
108 struct drm_i915_gem_object *obj;
109 unsigned int cache_level;
110
111 GEM_BUG_ON(!phys_size || phys_size > dma_size);
112 GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
113 GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
114
115 if (overflows_type(dma_size, obj->base.size))
116 return ERR_PTR(-E2BIG);
117
118 obj = i915_gem_object_alloc();
119 if (!obj)
120 return ERR_PTR(-ENOMEM);
121
122 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
123 i915_gem_object_init(obj, &huge_ops, &lock_class);
124
125 obj->read_domains = I915_GEM_DOMAIN_CPU;
126 obj->write_domain = I915_GEM_DOMAIN_CPU;
127 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
128 i915_gem_object_set_cache_coherency(obj, cache_level);
129 obj->scratch = phys_size;
130
131 return obj;
132 }
133