xref: /openbsd-src/sys/dev/pci/drm/i915/intel_memory_region.c (revision f327808bd62af73d349febeb7cc1a555012cbf01)
1c349dbc7Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
61bb76ff1Sjsg #include <linux/prandom.h>
71bb76ff1Sjsg 
81bb76ff1Sjsg #include <uapi/drm/i915_drm.h>
91bb76ff1Sjsg 
10c349dbc7Sjsg #include "intel_memory_region.h"
11c349dbc7Sjsg #include "i915_drv.h"
125ca02815Sjsg #include "i915_ttm_buddy_manager.h"
13c349dbc7Sjsg 
145ca02815Sjsg static const struct {
155ca02815Sjsg 	u16 class;
165ca02815Sjsg 	u16 instance;
175ca02815Sjsg } intel_region_map[] = {
185ca02815Sjsg 	[INTEL_REGION_SMEM] = {
195ca02815Sjsg 		.class = INTEL_MEMORY_SYSTEM,
205ca02815Sjsg 		.instance = 0,
215ca02815Sjsg 	},
221bb76ff1Sjsg 	[INTEL_REGION_LMEM_0] = {
235ca02815Sjsg 		.class = INTEL_MEMORY_LOCAL,
245ca02815Sjsg 		.instance = 0,
255ca02815Sjsg 	},
265ca02815Sjsg 	[INTEL_REGION_STOLEN_SMEM] = {
275ca02815Sjsg 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
285ca02815Sjsg 		.instance = 0,
295ca02815Sjsg 	},
305ca02815Sjsg 	[INTEL_REGION_STOLEN_LMEM] = {
315ca02815Sjsg 		.class = INTEL_MEMORY_STOLEN_LOCAL,
325ca02815Sjsg 		.instance = 0,
335ca02815Sjsg 	},
34c349dbc7Sjsg };
35c349dbc7Sjsg 
361bb76ff1Sjsg static int __iopagetest(struct intel_memory_region *mem,
371bb76ff1Sjsg 			u8 __iomem *va, int pagesize,
381bb76ff1Sjsg 			u8 value, resource_size_t offset,
391bb76ff1Sjsg 			const void *caller)
401bb76ff1Sjsg {
41f005ef32Sjsg 	int byte = get_random_u32_below(pagesize);
421bb76ff1Sjsg 	u8 result[3];
431bb76ff1Sjsg 
441bb76ff1Sjsg 	memset_io(va, value, pagesize); /* or GPF! */
451bb76ff1Sjsg 	wmb();
461bb76ff1Sjsg 
471bb76ff1Sjsg 	result[0] = ioread8(va);
481bb76ff1Sjsg 	result[1] = ioread8(va + byte);
491bb76ff1Sjsg 	result[2] = ioread8(va + pagesize - 1);
501bb76ff1Sjsg 	if (memchr_inv(result, value, sizeof(result))) {
511bb76ff1Sjsg 		dev_err(mem->i915->drm.dev,
521bb76ff1Sjsg 			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
5352571687Sjsg 			&mem->region, &mem->io.start, &offset, caller,
541bb76ff1Sjsg 			value, result[0], result[1], result[2]);
551bb76ff1Sjsg 		return -EINVAL;
561bb76ff1Sjsg 	}
571bb76ff1Sjsg 
581bb76ff1Sjsg 	return 0;
591bb76ff1Sjsg }
601bb76ff1Sjsg 
611bb76ff1Sjsg static int iopagetest(struct intel_memory_region *mem,
621bb76ff1Sjsg 		      resource_size_t offset,
631bb76ff1Sjsg 		      const void *caller)
641bb76ff1Sjsg {
651bb76ff1Sjsg 	STUB();
661bb76ff1Sjsg 	return -ENOSYS;
671bb76ff1Sjsg #ifdef notyet
681bb76ff1Sjsg 	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
691bb76ff1Sjsg 	void __iomem *va;
701bb76ff1Sjsg 	int err;
711bb76ff1Sjsg 	int i;
721bb76ff1Sjsg 
7352571687Sjsg 	va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
741bb76ff1Sjsg 	if (!va) {
751bb76ff1Sjsg 		dev_err(mem->i915->drm.dev,
761bb76ff1Sjsg 			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
7752571687Sjsg 			&mem->io.start, &offset, caller);
781bb76ff1Sjsg 		return -EFAULT;
791bb76ff1Sjsg 	}
801bb76ff1Sjsg 
811bb76ff1Sjsg 	for (i = 0; i < ARRAY_SIZE(val); i++) {
821bb76ff1Sjsg 		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
831bb76ff1Sjsg 		if (err)
841bb76ff1Sjsg 			break;
851bb76ff1Sjsg 
861bb76ff1Sjsg 		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
871bb76ff1Sjsg 		if (err)
881bb76ff1Sjsg 			break;
891bb76ff1Sjsg 	}
901bb76ff1Sjsg 
911bb76ff1Sjsg 	iounmap(va);
921bb76ff1Sjsg 	return err;
931bb76ff1Sjsg #endif
941bb76ff1Sjsg }
951bb76ff1Sjsg 
961bb76ff1Sjsg static resource_size_t random_page(resource_size_t last)
971bb76ff1Sjsg {
981bb76ff1Sjsg 	/* Limited to low 44b (16TiB), but should suffice for a spot check */
99f005ef32Sjsg 	return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
1001bb76ff1Sjsg }
1011bb76ff1Sjsg 
1021bb76ff1Sjsg static int iomemtest(struct intel_memory_region *mem,
1031bb76ff1Sjsg 		     bool test_all,
1041bb76ff1Sjsg 		     const void *caller)
1051bb76ff1Sjsg {
1061bb76ff1Sjsg 	resource_size_t last, page;
1071bb76ff1Sjsg 	int err;
1081bb76ff1Sjsg 
10952571687Sjsg 	if (resource_size(&mem->io) < PAGE_SIZE)
1101bb76ff1Sjsg 		return 0;
1111bb76ff1Sjsg 
11252571687Sjsg 	last = resource_size(&mem->io) - PAGE_SIZE;
1131bb76ff1Sjsg 
1141bb76ff1Sjsg 	/*
1151bb76ff1Sjsg 	 * Quick test to check read/write access to the iomap (backing store).
1161bb76ff1Sjsg 	 *
1171bb76ff1Sjsg 	 * Write a byte, read it back. If the iomapping fails, we expect
1181bb76ff1Sjsg 	 * a GPF preventing further execution. If the backing store does not
1191bb76ff1Sjsg 	 * exist, the read back will return garbage. We check a couple of pages,
1201bb76ff1Sjsg 	 * the first and last of the specified region to confirm the backing
1211bb76ff1Sjsg 	 * store + iomap does cover the entire memory region; and we check
1221bb76ff1Sjsg 	 * a random offset within as a quick spot check for bad memory.
1231bb76ff1Sjsg 	 */
1241bb76ff1Sjsg 
1251bb76ff1Sjsg 	if (test_all) {
1261bb76ff1Sjsg 		for (page = 0; page <= last; page += PAGE_SIZE) {
1271bb76ff1Sjsg 			err = iopagetest(mem, page, caller);
1281bb76ff1Sjsg 			if (err)
1291bb76ff1Sjsg 				return err;
1301bb76ff1Sjsg 		}
1311bb76ff1Sjsg 	} else {
1321bb76ff1Sjsg 		err = iopagetest(mem, 0, caller);
1331bb76ff1Sjsg 		if (err)
1341bb76ff1Sjsg 			return err;
1351bb76ff1Sjsg 
1361bb76ff1Sjsg 		err = iopagetest(mem, last, caller);
1371bb76ff1Sjsg 		if (err)
1381bb76ff1Sjsg 			return err;
1391bb76ff1Sjsg 
1401bb76ff1Sjsg 		err = iopagetest(mem, random_page(last), caller);
1411bb76ff1Sjsg 		if (err)
1421bb76ff1Sjsg 			return err;
1431bb76ff1Sjsg 	}
1441bb76ff1Sjsg 
1451bb76ff1Sjsg 	return 0;
1461bb76ff1Sjsg }
1471bb76ff1Sjsg 
148c349dbc7Sjsg struct intel_memory_region *
1495ca02815Sjsg intel_memory_region_lookup(struct drm_i915_private *i915,
1505ca02815Sjsg 			   u16 class, u16 instance)
1515ca02815Sjsg {
1525ca02815Sjsg 	struct intel_memory_region *mr;
1535ca02815Sjsg 	int id;
1545ca02815Sjsg 
1555ca02815Sjsg 	/* XXX: consider maybe converting to an rb tree at some point */
1565ca02815Sjsg 	for_each_memory_region(mr, i915, id) {
1575ca02815Sjsg 		if (mr->type == class && mr->instance == instance)
1585ca02815Sjsg 			return mr;
1595ca02815Sjsg 	}
1605ca02815Sjsg 
1615ca02815Sjsg 	return NULL;
1625ca02815Sjsg }
1635ca02815Sjsg 
1645ca02815Sjsg struct intel_memory_region *
165c349dbc7Sjsg intel_memory_region_by_type(struct drm_i915_private *i915,
166c349dbc7Sjsg 			    enum intel_memory_type mem_type)
167c349dbc7Sjsg {
168c349dbc7Sjsg 	struct intel_memory_region *mr;
169c349dbc7Sjsg 	int id;
170c349dbc7Sjsg 
171c349dbc7Sjsg 	for_each_memory_region(mr, i915, id)
172c349dbc7Sjsg 		if (mr->type == mem_type)
173c349dbc7Sjsg 			return mr;
174c349dbc7Sjsg 
175c349dbc7Sjsg 	return NULL;
176c349dbc7Sjsg }
177c349dbc7Sjsg 
1785ca02815Sjsg /**
1795ca02815Sjsg  * intel_memory_region_reserve - Reserve a memory range
1805ca02815Sjsg  * @mem: The region for which we want to reserve a range.
1815ca02815Sjsg  * @offset: Start of the range to reserve.
1825ca02815Sjsg  * @size: The size of the range to reserve.
1835ca02815Sjsg  *
1845ca02815Sjsg  * Return: 0 on success, negative error code on failure.
1855ca02815Sjsg  */
1865ca02815Sjsg int intel_memory_region_reserve(struct intel_memory_region *mem,
1875ca02815Sjsg 				resource_size_t offset,
1885ca02815Sjsg 				resource_size_t size)
189c349dbc7Sjsg {
1905ca02815Sjsg 	struct ttm_resource_manager *man = mem->region_private;
191c349dbc7Sjsg 
1925ca02815Sjsg 	GEM_BUG_ON(mem->is_range_manager);
193c349dbc7Sjsg 
1945ca02815Sjsg 	return i915_ttm_buddy_man_reserve(man, offset, size);
195c349dbc7Sjsg }
196c349dbc7Sjsg 
1971bb76ff1Sjsg void intel_memory_region_debug(struct intel_memory_region *mr,
1981bb76ff1Sjsg 			       struct drm_printer *printer)
1991bb76ff1Sjsg {
2001bb76ff1Sjsg 	drm_printf(printer, "%s: ", mr->name);
2011bb76ff1Sjsg 
2021bb76ff1Sjsg 	if (mr->region_private)
2031bb76ff1Sjsg 		ttm_resource_manager_debug(mr->region_private, printer);
2041bb76ff1Sjsg 	else
2051bb76ff1Sjsg 		drm_printf(printer, "total:%pa bytes\n", &mr->total);
2061bb76ff1Sjsg }
2071bb76ff1Sjsg 
2081bb76ff1Sjsg static int intel_memory_region_memtest(struct intel_memory_region *mem,
2091bb76ff1Sjsg 				       void *caller)
2101bb76ff1Sjsg {
2111bb76ff1Sjsg 	struct drm_i915_private *i915 = mem->i915;
2121bb76ff1Sjsg 	int err = 0;
2131bb76ff1Sjsg 
21452571687Sjsg 	if (!mem->io.start)
2151bb76ff1Sjsg 		return 0;
2161bb76ff1Sjsg 
2171bb76ff1Sjsg 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
2181bb76ff1Sjsg 		err = iomemtest(mem, i915->params.memtest, caller);
2191bb76ff1Sjsg 
2201bb76ff1Sjsg 	return err;
2211bb76ff1Sjsg }
2221bb76ff1Sjsg 
223c349dbc7Sjsg struct intel_memory_region *
224c349dbc7Sjsg intel_memory_region_create(struct drm_i915_private *i915,
225c349dbc7Sjsg 			   resource_size_t start,
226c349dbc7Sjsg 			   resource_size_t size,
227c349dbc7Sjsg 			   resource_size_t min_page_size,
228c349dbc7Sjsg 			   resource_size_t io_start,
2291bb76ff1Sjsg 			   resource_size_t io_size,
2305ca02815Sjsg 			   u16 type,
2315ca02815Sjsg 			   u16 instance,
232c349dbc7Sjsg 			   const struct intel_memory_region_ops *ops)
233c349dbc7Sjsg {
234c349dbc7Sjsg 	struct intel_memory_region *mem;
235c349dbc7Sjsg 	int err;
236c349dbc7Sjsg 
237c349dbc7Sjsg 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
238c349dbc7Sjsg 	if (!mem)
239c349dbc7Sjsg 		return ERR_PTR(-ENOMEM);
240c349dbc7Sjsg 
241c349dbc7Sjsg 	mem->i915 = i915;
242f005ef32Sjsg 	mem->region = DEFINE_RES_MEM(start, size);
24352571687Sjsg 	mem->io = DEFINE_RES_MEM(io_start, io_size);
244c349dbc7Sjsg 	mem->min_page_size = min_page_size;
245c349dbc7Sjsg 	mem->ops = ops;
246c349dbc7Sjsg 	mem->total = size;
2475ca02815Sjsg 	mem->type = type;
2485ca02815Sjsg 	mem->instance = instance;
249c349dbc7Sjsg 
250c349dbc7Sjsg 	rw_init(&mem->objects.lock, "memobj");
251c349dbc7Sjsg 	INIT_LIST_HEAD(&mem->objects.list);
252c349dbc7Sjsg 
253c349dbc7Sjsg 	if (ops->init) {
254c349dbc7Sjsg 		err = ops->init(mem);
255c349dbc7Sjsg 		if (err)
256c349dbc7Sjsg 			goto err_free;
257c349dbc7Sjsg 	}
258c349dbc7Sjsg 
2591bb76ff1Sjsg 	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
2601bb76ff1Sjsg 	if (err)
2611bb76ff1Sjsg 		goto err_release;
2621bb76ff1Sjsg 
263c349dbc7Sjsg 	return mem;
264c349dbc7Sjsg 
2651bb76ff1Sjsg err_release:
2661bb76ff1Sjsg 	if (mem->ops->release)
2671bb76ff1Sjsg 		mem->ops->release(mem);
268c349dbc7Sjsg err_free:
269c349dbc7Sjsg 	kfree(mem);
270c349dbc7Sjsg 	return ERR_PTR(err);
271c349dbc7Sjsg }
272c349dbc7Sjsg 
273c349dbc7Sjsg void intel_memory_region_set_name(struct intel_memory_region *mem,
274c349dbc7Sjsg 				  const char *fmt, ...)
275c349dbc7Sjsg {
276c349dbc7Sjsg 	va_list ap;
277c349dbc7Sjsg 
278c349dbc7Sjsg 	va_start(ap, fmt);
279c349dbc7Sjsg 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
280c349dbc7Sjsg 	va_end(ap);
281c349dbc7Sjsg }
282c349dbc7Sjsg 
2831bb76ff1Sjsg void intel_memory_region_avail(struct intel_memory_region *mr,
2841bb76ff1Sjsg 			       u64 *avail, u64 *visible_avail)
285c349dbc7Sjsg {
2861bb76ff1Sjsg 	if (mr->type == INTEL_MEMORY_LOCAL) {
2871bb76ff1Sjsg 		i915_ttm_buddy_man_avail(mr->region_private,
2881bb76ff1Sjsg 					 avail, visible_avail);
2891bb76ff1Sjsg 		*avail <<= PAGE_SHIFT;
2901bb76ff1Sjsg 		*visible_avail <<= PAGE_SHIFT;
2911bb76ff1Sjsg 	} else {
2921bb76ff1Sjsg 		*avail = mr->total;
2931bb76ff1Sjsg 		*visible_avail = mr->total;
2941bb76ff1Sjsg 	}
2951bb76ff1Sjsg }
2961bb76ff1Sjsg 
2971bb76ff1Sjsg void intel_memory_region_destroy(struct intel_memory_region *mem)
2981bb76ff1Sjsg {
2991bb76ff1Sjsg 	int ret = 0;
300c349dbc7Sjsg 
301c349dbc7Sjsg 	if (mem->ops->release)
3021bb76ff1Sjsg 		ret = mem->ops->release(mem);
303c349dbc7Sjsg 
3041bb76ff1Sjsg #ifdef notyet
3051bb76ff1Sjsg 	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
3061bb76ff1Sjsg #endif
307c349dbc7Sjsg 	mutex_destroy(&mem->objects.lock);
3081bb76ff1Sjsg 	if (!ret)
309c349dbc7Sjsg 		kfree(mem);
310c349dbc7Sjsg }
311c349dbc7Sjsg 
312c349dbc7Sjsg /* Global memory region registration -- only slight layer inversions! */
313c349dbc7Sjsg 
314c349dbc7Sjsg int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
315c349dbc7Sjsg {
316c349dbc7Sjsg 	int err, i;
317c349dbc7Sjsg 
318c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
319c349dbc7Sjsg 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
3205ca02815Sjsg 		u16 type, instance;
321c349dbc7Sjsg 
322c349dbc7Sjsg 		if (!HAS_REGION(i915, BIT(i)))
323c349dbc7Sjsg 			continue;
324c349dbc7Sjsg 
3255ca02815Sjsg 		type = intel_region_map[i].class;
3265ca02815Sjsg 		instance = intel_region_map[i].instance;
327c349dbc7Sjsg 		switch (type) {
328c349dbc7Sjsg 		case INTEL_MEMORY_SYSTEM:
3295ca02815Sjsg 			if (IS_DGFX(i915))
3305ca02815Sjsg 				mem = i915_gem_ttm_system_setup(i915, type,
3315ca02815Sjsg 								instance);
3325ca02815Sjsg 			else
3335ca02815Sjsg 				mem = i915_gem_shmem_setup(i915, type,
3345ca02815Sjsg 							   instance);
335c349dbc7Sjsg 			break;
3365ca02815Sjsg 		case INTEL_MEMORY_STOLEN_LOCAL:
3375ca02815Sjsg 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
3385ca02815Sjsg 			if (!IS_ERR(mem))
3395ca02815Sjsg 				i915->mm.stolen_region = mem;
340c349dbc7Sjsg 			break;
3415ca02815Sjsg 		case INTEL_MEMORY_STOLEN_SYSTEM:
3425ca02815Sjsg 			mem = i915_gem_stolen_smem_setup(i915, type, instance);
3435ca02815Sjsg 			if (!IS_ERR(mem))
3445ca02815Sjsg 				i915->mm.stolen_region = mem;
345c349dbc7Sjsg 			break;
3465ca02815Sjsg 		default:
3475ca02815Sjsg 			continue;
348c349dbc7Sjsg 		}
349c349dbc7Sjsg 
350c349dbc7Sjsg 		if (IS_ERR(mem)) {
351c349dbc7Sjsg 			err = PTR_ERR(mem);
352c349dbc7Sjsg 			drm_err(&i915->drm,
353c349dbc7Sjsg 				"Failed to setup region(%d) type=%d\n",
354c349dbc7Sjsg 				err, type);
355c349dbc7Sjsg 			goto out_cleanup;
356c349dbc7Sjsg 		}
357c349dbc7Sjsg 
3585ca02815Sjsg 		mem->id = i;
359c349dbc7Sjsg 		i915->mm.regions[i] = mem;
360c349dbc7Sjsg 	}
361c349dbc7Sjsg 
362*f327808bSjsg 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
363*f327808bSjsg 		struct intel_memory_region *mem = i915->mm.regions[i];
364*f327808bSjsg 		u64 region_size, io_size;
365*f327808bSjsg 
366*f327808bSjsg 		if (!mem)
367*f327808bSjsg 			continue;
368*f327808bSjsg 
369*f327808bSjsg 		region_size = resource_size(&mem->region) >> 20;
370*f327808bSjsg 		io_size = resource_size(&mem->io) >> 20;
371*f327808bSjsg 
372*f327808bSjsg 		if (resource_size(&mem->io))
373*f327808bSjsg 			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
374*f327808bSjsg 				mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
375*f327808bSjsg 		else
376*f327808bSjsg 			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
377*f327808bSjsg 				mem->id, mem->name, region_size, &mem->region);
378*f327808bSjsg 	}
379*f327808bSjsg 
380c349dbc7Sjsg 	return 0;
381c349dbc7Sjsg 
382c349dbc7Sjsg out_cleanup:
383c349dbc7Sjsg 	intel_memory_regions_driver_release(i915);
384c349dbc7Sjsg 	return err;
385c349dbc7Sjsg }
386c349dbc7Sjsg 
387c349dbc7Sjsg void intel_memory_regions_driver_release(struct drm_i915_private *i915)
388c349dbc7Sjsg {
389c349dbc7Sjsg 	int i;
390c349dbc7Sjsg 
391c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
392c349dbc7Sjsg 		struct intel_memory_region *region =
393c349dbc7Sjsg 			fetch_and_zero(&i915->mm.regions[i]);
394c349dbc7Sjsg 
395c349dbc7Sjsg 		if (region)
3961bb76ff1Sjsg 			intel_memory_region_destroy(region);
397c349dbc7Sjsg 	}
398c349dbc7Sjsg }
399c349dbc7Sjsg 
400c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
401c349dbc7Sjsg #include "selftests/intel_memory_region.c"
402c349dbc7Sjsg #include "selftests/mock_region.c"
403c349dbc7Sjsg #endif
404