1 /* $NetBSD: i915_gem_stolen.c,v 1.7 2024/01/19 22:24:38 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2008-2012 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.7 2024/01/19 22:24:38 riastradh Exp $");
11
12 #include <linux/errno.h>
13 #include <linux/mutex.h>
14
15 #include <drm/drm_mm.h>
16 #include <drm/i915_drm.h>
17
18 #include "gem/i915_gem_region.h"
19 #include "i915_drv.h"
20 #include "i915_gem_stolen.h"
21
22 #include <linux/nbsd-namespace.h>
23
24 /*
25 * The BIOS typically reserves some of the system's memory for the exclusive
26 * use of the integrated graphics. This memory is no longer available for
27 * use by the OS and so the user finds that his system has less memory
28 * available than he put in. We refer to this memory as stolen.
29 *
30 * The BIOS will allocate its framebuffer from the stolen memory. Our
31 * goal is try to reuse that object for our own fbcon which must always
32 * be available for panics. Anything else we can reuse the stolen memory
33 * for is a boon.
34 */
35
i915_gem_stolen_insert_node_in_range(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment,u64 start,u64 end)36 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
37 struct drm_mm_node *node, u64 size,
38 unsigned alignment, u64 start, u64 end)
39 {
40 int ret;
41
42 if (!drm_mm_initialized(&i915->mm.stolen))
43 return -ENODEV;
44
45 /* WaSkipStolenMemoryFirstPage:bdw+ */
46 if (INTEL_GEN(i915) >= 8 && start < 4096)
47 start = 4096;
48
49 mutex_lock(&i915->mm.stolen_lock);
50 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
51 size, alignment, 0,
52 start, end, DRM_MM_INSERT_BEST);
53 mutex_unlock(&i915->mm.stolen_lock);
54
55 return ret;
56 }
57
i915_gem_stolen_insert_node(struct drm_i915_private * i915,struct drm_mm_node * node,u64 size,unsigned alignment)58 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
59 struct drm_mm_node *node, u64 size,
60 unsigned alignment)
61 {
62 return i915_gem_stolen_insert_node_in_range(i915, node, size,
63 alignment, 0, U64_MAX);
64 }
65
i915_gem_stolen_remove_node(struct drm_i915_private * i915,struct drm_mm_node * node)66 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node)
68 {
69 mutex_lock(&i915->mm.stolen_lock);
70 drm_mm_remove_node(node);
71 mutex_unlock(&i915->mm.stolen_lock);
72 }
73
i915_adjust_stolen(struct drm_i915_private * i915,struct resource * dsm)74 static int i915_adjust_stolen(struct drm_i915_private *i915,
75 struct resource *dsm)
76 {
77 struct i915_ggtt *ggtt = &i915->ggtt;
78 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
79 struct resource *r;
80
81 if (dsm->start == 0 || dsm->end <= dsm->start)
82 return -EINVAL;
83
84 /*
85 * TODO: We have yet too encounter the case where the GTT wasn't at the
86 * end of stolen. With that assumption we could simplify this.
87 */
88
89 /* Make sure we don't clobber the GTT if it's within stolen memory */
90 if (INTEL_GEN(i915) <= 4 &&
91 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
92 struct resource stolen[2] = {*dsm, *dsm};
93 struct resource ggtt_res;
94 resource_size_t ggtt_start;
95
96 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
97 if (IS_GEN(i915, 4))
98 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
99 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
100 else
101 ggtt_start &= PGTBL_ADDRESS_LO_MASK;
102
103 ggtt_res =
104 (struct resource) DEFINE_RES_MEM(ggtt_start,
105 ggtt_total_entries(ggtt) * 4);
106
107 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
108 stolen[0].end = ggtt_res.start;
109 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
110 stolen[1].start = ggtt_res.end;
111
112 /* Pick the larger of the two chunks */
113 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
114 *dsm = stolen[0];
115 else
116 *dsm = stolen[1];
117
118 if (stolen[0].start != stolen[1].start ||
119 stolen[0].end != stolen[1].end) {
120 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
121 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
122 }
123 }
124
125 #ifdef __NetBSD__ /* XXX */
126 __USE(r);
127 #else
128 /*
129 * Verify that nothing else uses this physical address. Stolen
130 * memory should be reserved by the BIOS and hidden from the
131 * kernel. So if the region is already marked as busy, something
132 * is seriously wrong.
133 */
134 r = devm_request_mem_region(i915->drm.dev, dsm->start,
135 resource_size(dsm),
136 "Graphics Stolen Memory");
137 if (r == NULL) {
138 /*
139 * One more attempt but this time requesting region from
140 * start + 1, as we have seen that this resolves the region
141 * conflict with the PCI Bus.
142 * This is a BIOS w/a: Some BIOS wrap stolen in the root
143 * PCI bus, but have an off-by-one error. Hence retry the
144 * reservation starting from 1 instead of 0.
145 * There's also BIOS with off-by-one on the other end.
146 */
147 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
148 resource_size(dsm) - 2,
149 "Graphics Stolen Memory");
150 /*
151 * GEN3 firmware likes to smash pci bridges into the stolen
152 * range. Apparently this works.
153 */
154 if (!r && !IS_GEN(i915, 3)) {
155 DRM_ERROR("conflict detected with stolen region: %pR\n",
156 dsm);
157
158 return -EBUSY;
159 }
160 }
161 #endif
162
163 return 0;
164 }
165
i915_gem_cleanup_stolen(struct drm_i915_private * i915)166 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
167 {
168 if (!drm_mm_initialized(&i915->mm.stolen))
169 return;
170
171 mutex_destroy(&i915->mm.stolen_lock);
172 drm_mm_takedown(&i915->mm.stolen);
173 }
174
g4x_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)175 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
176 struct intel_uncore *uncore,
177 resource_size_t *base,
178 resource_size_t *size)
179 {
180 u32 reg_val = intel_uncore_read(uncore,
181 IS_GM45(i915) ?
182 CTG_STOLEN_RESERVED :
183 ELK_STOLEN_RESERVED);
184 resource_size_t stolen_top = i915->dsm.end + 1;
185
186 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
187 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
188
189 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
190 return;
191
192 /*
193 * Whether ILK really reuses the ELK register for this is unclear.
194 * Let's see if we catch anyone with this supposedly enabled on ILK.
195 */
196 WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
197 reg_val);
198
199 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
200 return;
201
202 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
203 return;
204
205 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
206 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
207
208 *size = stolen_top - *base;
209 }
210
gen6_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)211 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
212 struct intel_uncore *uncore,
213 resource_size_t *base,
214 resource_size_t *size)
215 {
216 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
217
218 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
219
220 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
221 return;
222
223 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
224
225 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
226 case GEN6_STOLEN_RESERVED_1M:
227 *size = 1024 * 1024;
228 break;
229 case GEN6_STOLEN_RESERVED_512K:
230 *size = 512 * 1024;
231 break;
232 case GEN6_STOLEN_RESERVED_256K:
233 *size = 256 * 1024;
234 break;
235 case GEN6_STOLEN_RESERVED_128K:
236 *size = 128 * 1024;
237 break;
238 default:
239 *size = 1024 * 1024;
240 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
241 }
242 }
243
vlv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)244 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
245 struct intel_uncore *uncore,
246 resource_size_t *base,
247 resource_size_t *size)
248 {
249 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
250 resource_size_t stolen_top = i915->dsm.end + 1;
251
252 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
253
254 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
255 return;
256
257 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
258 default:
259 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
260 /* fall through */
261 case GEN7_STOLEN_RESERVED_1M:
262 *size = 1024 * 1024;
263 break;
264 }
265
266 /*
267 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
268 * reserved location as (top - size).
269 */
270 *base = stolen_top - *size;
271 }
272
gen7_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)273 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
274 struct intel_uncore *uncore,
275 resource_size_t *base,
276 resource_size_t *size)
277 {
278 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
279
280 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
281
282 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
283 return;
284
285 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
286
287 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
288 case GEN7_STOLEN_RESERVED_1M:
289 *size = 1024 * 1024;
290 break;
291 case GEN7_STOLEN_RESERVED_256K:
292 *size = 256 * 1024;
293 break;
294 default:
295 *size = 1024 * 1024;
296 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
297 }
298 }
299
chv_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)300 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
301 struct intel_uncore *uncore,
302 resource_size_t *base,
303 resource_size_t *size)
304 {
305 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
306
307 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
308
309 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
310 return;
311
312 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
313
314 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
315 case GEN8_STOLEN_RESERVED_1M:
316 *size = 1024 * 1024;
317 break;
318 case GEN8_STOLEN_RESERVED_2M:
319 *size = 2 * 1024 * 1024;
320 break;
321 case GEN8_STOLEN_RESERVED_4M:
322 *size = 4 * 1024 * 1024;
323 break;
324 case GEN8_STOLEN_RESERVED_8M:
325 *size = 8 * 1024 * 1024;
326 break;
327 default:
328 *size = 8 * 1024 * 1024;
329 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
330 }
331 }
332
bdw_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)333 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
334 struct intel_uncore *uncore,
335 resource_size_t *base,
336 resource_size_t *size)
337 {
338 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
339 resource_size_t stolen_top = i915->dsm.end + 1;
340
341 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
342
343 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
344 return;
345
346 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
347 return;
348
349 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
350 *size = stolen_top - *base;
351 }
352
icl_get_stolen_reserved(struct drm_i915_private * i915,struct intel_uncore * uncore,resource_size_t * base,resource_size_t * size)353 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
354 struct intel_uncore *uncore,
355 resource_size_t *base,
356 resource_size_t *size)
357 {
358 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
359
360 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016"PRIx64"\n", reg_val);
361
362 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
363
364 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
365 case GEN8_STOLEN_RESERVED_1M:
366 *size = 1024 * 1024;
367 break;
368 case GEN8_STOLEN_RESERVED_2M:
369 *size = 2 * 1024 * 1024;
370 break;
371 case GEN8_STOLEN_RESERVED_4M:
372 *size = 4 * 1024 * 1024;
373 break;
374 case GEN8_STOLEN_RESERVED_8M:
375 *size = 8 * 1024 * 1024;
376 break;
377 default:
378 *size = 8 * 1024 * 1024;
379 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
380 }
381 }
382
i915_gem_init_stolen(struct drm_i915_private * i915)383 static int i915_gem_init_stolen(struct drm_i915_private *i915)
384 {
385 struct intel_uncore *uncore = &i915->uncore;
386 resource_size_t reserved_base, stolen_top;
387 resource_size_t reserved_total, reserved_size;
388
389 mutex_init(&i915->mm.stolen_lock);
390
391 if (intel_vgpu_active(i915)) {
392 dev_notice(i915->drm.dev,
393 "%s, disabling use of stolen memory\n",
394 "iGVT-g active");
395 return 0;
396 }
397
398 if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
399 dev_notice(i915->drm.dev,
400 "%s, disabling use of stolen memory\n",
401 "DMAR active");
402 return 0;
403 }
404
405 if (resource_size(&intel_graphics_stolen_res) == 0)
406 return 0;
407
408 i915->dsm = intel_graphics_stolen_res;
409
410 if (i915_adjust_stolen(i915, &i915->dsm))
411 return 0;
412
413 GEM_BUG_ON(i915->dsm.start == 0);
414 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
415
416 stolen_top = i915->dsm.end + 1;
417 reserved_base = stolen_top;
418 reserved_size = 0;
419
420 switch (INTEL_GEN(i915)) {
421 case 2:
422 case 3:
423 break;
424 case 4:
425 if (!IS_G4X(i915))
426 break;
427 /* fall through */
428 case 5:
429 g4x_get_stolen_reserved(i915, uncore,
430 &reserved_base, &reserved_size);
431 break;
432 case 6:
433 gen6_get_stolen_reserved(i915, uncore,
434 &reserved_base, &reserved_size);
435 break;
436 case 7:
437 if (IS_VALLEYVIEW(i915))
438 vlv_get_stolen_reserved(i915, uncore,
439 &reserved_base, &reserved_size);
440 else
441 gen7_get_stolen_reserved(i915, uncore,
442 &reserved_base, &reserved_size);
443 break;
444 case 8:
445 case 9:
446 case 10:
447 if (IS_LP(i915))
448 chv_get_stolen_reserved(i915, uncore,
449 &reserved_base, &reserved_size);
450 else
451 bdw_get_stolen_reserved(i915, uncore,
452 &reserved_base, &reserved_size);
453 break;
454 default:
455 MISSING_CASE(INTEL_GEN(i915));
456 /* fall-through */
457 case 11:
458 case 12:
459 icl_get_stolen_reserved(i915, uncore,
460 &reserved_base,
461 &reserved_size);
462 break;
463 }
464
465 /*
466 * Our expectation is that the reserved space is at the top of the
467 * stolen region and *never* at the bottom. If we see !reserved_base,
468 * it likely means we failed to read the registers correctly.
469 */
470 if (!reserved_base) {
471 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
472 &reserved_base, &reserved_size);
473 reserved_base = stolen_top;
474 reserved_size = 0;
475 }
476
477 i915->dsm_reserved =
478 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
479
480 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
481 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
482 &i915->dsm_reserved, &i915->dsm);
483 return 0;
484 }
485
486 /* It is possible for the reserved area to end before the end of stolen
487 * memory, so just consider the start. */
488 reserved_total = stolen_top - reserved_base;
489
490 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %"PRIu64"K, usable: %"PRIu64"K\n",
491 (u64)resource_size(&i915->dsm) >> 10,
492 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
493
494 i915->stolen_usable_size =
495 resource_size(&i915->dsm) - reserved_total;
496
497 /* Basic memrange allocator for stolen space. */
498 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
499
500 return 0;
501 }
502
503 static struct sg_table *
i915_pages_create_for_stolen(struct drm_device * dev,resource_size_t offset,resource_size_t size)504 i915_pages_create_for_stolen(struct drm_device *dev,
505 resource_size_t offset, resource_size_t size)
506 {
507 struct drm_i915_private *i915 = to_i915(dev);
508 struct sg_table *st;
509 struct scatterlist *sg;
510 #ifdef __NetBSD__
511 bus_dma_tag_t dmat = i915->drm.dmat;
512 bus_dma_segment_t *seg = NULL;
513 int nseg = 0, i;
514 bool loaded = false;
515 int ret;
516 #endif
517
518 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
519
520 /* We hide that we have no struct page backing our stolen object
521 * by wrapping the contiguous physical allocation with a fake
522 * dma mapping in a single scatterlist.
523 */
524
525 st = kmalloc(sizeof(*st), GFP_KERNEL);
526 if (st == NULL)
527 return ERR_PTR(-ENOMEM);
528
529 #ifdef __NetBSD__
530 KASSERT((size % PAGE_SIZE) == 0);
531 nseg = size / PAGE_SIZE;
532 seg = kmem_alloc(nseg * sizeof(seg[0]), KM_SLEEP);
533
534 /*
535 * XXX x86 bus_dmamap_load_raw fails to respect the maxsegsz we
536 * pass to bus_dmamap_create, so we have to create page-sized
537 * segments to begin with.
538 */
539 for (i = 0; i < nseg; i++) {
540 seg[i].ds_addr = (bus_addr_t)i915->dsm.start + offset +
541 i*PAGE_SIZE;
542 seg[i].ds_len = PAGE_SIZE;
543 }
544
545 sg = NULL;
546
547 ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL);
548 if (ret) {
549 DRM_ERROR("failed to alloc sg table for stolen object: %d\n",
550 ret);
551 ret = -ENOMEM;
552 goto out;
553 }
554 sg = st->sgl;
555
556 /* XXX errno NetBSD->Linux */
557 ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
558 BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
559 if (ret) {
560 DRM_ERROR("failed to create DMA map for stolen object: %d\n",
561 ret);
562 st->sgl->sg_dmamap = NULL;
563 goto out;
564 }
565 st->sgl->sg_dmat = dmat;
566
567 /* XXX errno NetBSD->Liux */
568 ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
569 BUS_DMA_WAITOK);
570 if (ret) {
571 DRM_ERROR("failed to load DMA map for stolen object: %d\n",
572 ret);
573 goto out;
574 }
575 loaded = true;
576
577 out: kmem_free(seg, nseg * sizeof(seg[0]));
578 if (ret) {
579 if (loaded)
580 bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
581 if (sg && sg->sg_dmamap)
582 bus_dmamap_destroy(dmat, sg->sg_dmamap);
583 if (sg)
584 sg_free_table(st);
585 kfree(st);
586 return ERR_PTR(ret);
587 }
588 #else
589 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
590 kfree(st);
591 return ERR_PTR(-ENOMEM);
592 }
593
594 sg = st->sgl;
595 sg->offset = 0;
596 sg->length = size;
597
598 sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
599 sg_dma_len(sg) = size;
600 #endif
601
602 return st;
603 }
604
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object * obj)605 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
606 {
607 struct sg_table *pages =
608 i915_pages_create_for_stolen(obj->base.dev,
609 obj->stolen->start,
610 obj->stolen->size);
611 if (IS_ERR(pages))
612 return PTR_ERR(pages);
613
614 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
615
616 return 0;
617 }
618
i915_gem_object_put_pages_stolen(struct drm_i915_gem_object * obj,struct sg_table * pages)619 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
620 struct sg_table *pages)
621 {
622 /* Should only be called from i915_gem_object_release_stolen() */
623 #ifdef __NetBSD__
624 bus_dmamap_unload(obj->base.dev->dmat, pages->sgl->sg_dmamap);
625 #endif
626 sg_free_table(pages);
627 kfree(pages);
628 }
629
630 static void
i915_gem_object_release_stolen(struct drm_i915_gem_object * obj)631 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
632 {
633 struct drm_i915_private *i915 = to_i915(obj->base.dev);
634 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
635
636 GEM_BUG_ON(!stolen);
637
638 i915_gem_object_release_memory_region(obj);
639
640 i915_gem_stolen_remove_node(i915, stolen);
641 kfree(stolen);
642 }
643
644 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
645 .get_pages = i915_gem_object_get_pages_stolen,
646 .put_pages = i915_gem_object_put_pages_stolen,
647 .release = i915_gem_object_release_stolen,
648 };
649
650 static struct drm_i915_gem_object *
__i915_gem_object_create_stolen(struct intel_memory_region * mem,struct drm_mm_node * stolen)651 __i915_gem_object_create_stolen(struct intel_memory_region *mem,
652 struct drm_mm_node *stolen)
653 {
654 static struct lock_class_key lock_class;
655 struct drm_i915_gem_object *obj;
656 unsigned int cache_level;
657 int err = -ENOMEM;
658
659 obj = i915_gem_object_alloc();
660 if (!obj)
661 goto err;
662
663 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
664 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
665
666 obj->stolen = stolen;
667 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
668 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
669 i915_gem_object_set_cache_coherency(obj, cache_level);
670
671 err = i915_gem_object_pin_pages(obj);
672 if (err)
673 goto cleanup;
674
675 i915_gem_object_init_memory_region(obj, mem, 0);
676
677 return obj;
678
679 cleanup:
680 i915_gem_object_free(obj);
681 err:
682 return ERR_PTR(err);
683 }
684
685 static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct intel_memory_region * mem,resource_size_t size,unsigned int flags)686 _i915_gem_object_create_stolen(struct intel_memory_region *mem,
687 resource_size_t size,
688 unsigned int flags)
689 {
690 struct drm_i915_private *i915 = mem->i915;
691 struct drm_i915_gem_object *obj;
692 struct drm_mm_node *stolen;
693 int ret;
694
695 if (!drm_mm_initialized(&i915->mm.stolen))
696 return ERR_PTR(-ENODEV);
697
698 if (size == 0)
699 return ERR_PTR(-EINVAL);
700
701 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
702 if (!stolen)
703 return ERR_PTR(-ENOMEM);
704
705 ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
706 if (ret) {
707 obj = ERR_PTR(ret);
708 goto err_free;
709 }
710
711 obj = __i915_gem_object_create_stolen(mem, stolen);
712 if (IS_ERR(obj))
713 goto err_remove;
714
715 return obj;
716
717 err_remove:
718 i915_gem_stolen_remove_node(i915, stolen);
719 err_free:
720 kfree(stolen);
721 return obj;
722 }
723
724 struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private * i915,resource_size_t size)725 i915_gem_object_create_stolen(struct drm_i915_private *i915,
726 resource_size_t size)
727 {
728 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
729 size, I915_BO_ALLOC_CONTIGUOUS);
730 }
731
init_stolen(struct intel_memory_region * mem)732 static int init_stolen(struct intel_memory_region *mem)
733 {
734 intel_memory_region_set_name(mem, "stolen");
735
736 /*
737 * Initialise stolen early so that we may reserve preallocated
738 * objects for the BIOS to KMS transition.
739 */
740 return i915_gem_init_stolen(mem->i915);
741 }
742
release_stolen(struct intel_memory_region * mem)743 static void release_stolen(struct intel_memory_region *mem)
744 {
745 i915_gem_cleanup_stolen(mem->i915);
746 }
747
748 static const struct intel_memory_region_ops i915_region_stolen_ops = {
749 .init = init_stolen,
750 .release = release_stolen,
751 .create_object = _i915_gem_object_create_stolen,
752 };
753
i915_gem_stolen_setup(struct drm_i915_private * i915)754 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
755 {
756 return intel_memory_region_create(i915,
757 intel_graphics_stolen_res.start,
758 resource_size(&intel_graphics_stolen_res),
759 PAGE_SIZE, 0,
760 &i915_region_stolen_ops);
761 }
762
763 struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private * i915,resource_size_t stolen_offset,resource_size_t gtt_offset,resource_size_t size)764 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
765 resource_size_t stolen_offset,
766 resource_size_t gtt_offset,
767 resource_size_t size)
768 {
769 struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
770 struct i915_ggtt *ggtt = &i915->ggtt;
771 struct drm_i915_gem_object *obj;
772 struct drm_mm_node *stolen;
773 struct i915_vma *vma;
774 int ret;
775
776 if (!drm_mm_initialized(&i915->mm.stolen))
777 return ERR_PTR(-ENODEV);
778
779 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
780 &stolen_offset, >t_offset, &size);
781
782 /* KISS and expect everything to be page-aligned */
783 if (WARN_ON(size == 0) ||
784 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
785 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
786 return ERR_PTR(-EINVAL);
787
788 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
789 if (!stolen)
790 return ERR_PTR(-ENOMEM);
791
792 stolen->start = stolen_offset;
793 stolen->size = size;
794 mutex_lock(&i915->mm.stolen_lock);
795 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
796 mutex_unlock(&i915->mm.stolen_lock);
797 if (ret) {
798 DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
799 kfree(stolen);
800 return ERR_PTR(ret);
801 }
802
803 obj = __i915_gem_object_create_stolen(mem, stolen);
804 if (IS_ERR(obj)) {
805 DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
806 i915_gem_stolen_remove_node(i915, stolen);
807 kfree(stolen);
808 return obj;
809 }
810
811 /* Some objects just need physical mem from stolen space */
812 if (gtt_offset == I915_GTT_OFFSET_NONE)
813 return obj;
814
815 ret = i915_gem_object_pin_pages(obj);
816 if (ret)
817 goto err;
818
819 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
820 if (IS_ERR(vma)) {
821 ret = PTR_ERR(vma);
822 goto err_pages;
823 }
824
825 /* To simplify the initialisation sequence between KMS and GTT,
826 * we allow construction of the stolen object prior to
827 * setting up the GTT space. The actual reservation will occur
828 * later.
829 */
830 mutex_lock(&ggtt->vm.mutex);
831 ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
832 size, gtt_offset, obj->cache_level,
833 0);
834 if (ret) {
835 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
836 mutex_unlock(&ggtt->vm.mutex);
837 goto err_pages;
838 }
839
840 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
841
842 GEM_BUG_ON(vma->pages);
843 vma->pages = obj->mm.pages;
844 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
845
846 set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
847 __i915_vma_set_map_and_fenceable(vma);
848
849 list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
850 mutex_unlock(&ggtt->vm.mutex);
851
852 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
853 atomic_inc(&obj->bind_count);
854
855 return obj;
856
857 err_pages:
858 i915_gem_object_unpin_pages(obj);
859 err:
860 i915_gem_object_put(obj);
861 return ERR_PTR(ret);
862 }
863