1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/prandom.h> 7 8 #include <uapi/drm/i915_drm.h> 9 10 #include "intel_memory_region.h" 11 #include "i915_drv.h" 12 #include "i915_ttm_buddy_manager.h" 13 14 static const struct { 15 u16 class; 16 u16 instance; 17 } intel_region_map[] = { 18 [INTEL_REGION_SMEM] = { 19 .class = INTEL_MEMORY_SYSTEM, 20 .instance = 0, 21 }, 22 [INTEL_REGION_LMEM_0] = { 23 .class = INTEL_MEMORY_LOCAL, 24 .instance = 0, 25 }, 26 [INTEL_REGION_STOLEN_SMEM] = { 27 .class = INTEL_MEMORY_STOLEN_SYSTEM, 28 .instance = 0, 29 }, 30 [INTEL_REGION_STOLEN_LMEM] = { 31 .class = INTEL_MEMORY_STOLEN_LOCAL, 32 .instance = 0, 33 }, 34 }; 35 36 static int __iopagetest(struct intel_memory_region *mem, 37 u8 __iomem *va, int pagesize, 38 u8 value, resource_size_t offset, 39 const void *caller) 40 { 41 int byte = get_random_u32_below(pagesize); 42 u8 result[3]; 43 44 memset_io(va, value, pagesize); /* or GPF! */ 45 wmb(); 46 47 result[0] = ioread8(va); 48 result[1] = ioread8(va + byte); 49 result[2] = ioread8(va + pagesize - 1); 50 if (memchr_inv(result, value, sizeof(result))) { 51 dev_err(mem->i915->drm.dev, 52 "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n", 53 &mem->region, &mem->io.start, &offset, caller, 54 value, result[0], result[1], result[2]); 55 return -EINVAL; 56 } 57 58 return 0; 59 } 60 61 static int iopagetest(struct intel_memory_region *mem, 62 resource_size_t offset, 63 const void *caller) 64 { 65 STUB(); 66 return -ENOSYS; 67 #ifdef notyet 68 const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 }; 69 void __iomem *va; 70 int err; 71 int i; 72 73 va = ioremap_wc(mem->io.start + offset, PAGE_SIZE); 74 if (!va) { 75 dev_err(mem->i915->drm.dev, 76 "Failed to ioremap memory region [%pa + %pa] for %ps\n", 77 &mem->io.start, &offset, caller); 78 return -EFAULT; 79 } 80 81 for (i = 0; i < ARRAY_SIZE(val); i++) { 82 err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller); 83 if (err) 84 break; 85 86 err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller); 87 if (err) 88 break; 89 } 90 91 iounmap(va); 92 return err; 93 #endif 94 } 95 96 static resource_size_t random_page(resource_size_t last) 97 { 98 /* Limited to low 44b (16TiB), but should suffice for a spot check */ 99 return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT; 100 } 101 102 static int iomemtest(struct intel_memory_region *mem, 103 bool test_all, 104 const void *caller) 105 { 106 resource_size_t last, page; 107 int err; 108 109 if (resource_size(&mem->io) < PAGE_SIZE) 110 return 0; 111 112 last = resource_size(&mem->io) - PAGE_SIZE; 113 114 /* 115 * Quick test to check read/write access to the iomap (backing store). 116 * 117 * Write a byte, read it back. If the iomapping fails, we expect 118 * a GPF preventing further execution. If the backing store does not 119 * exist, the read back will return garbage. We check a couple of pages, 120 * the first and last of the specified region to confirm the backing 121 * store + iomap does cover the entire memory region; and we check 122 * a random offset within as a quick spot check for bad memory. 123 */ 124 125 if (test_all) { 126 for (page = 0; page <= last; page += PAGE_SIZE) { 127 err = iopagetest(mem, page, caller); 128 if (err) 129 return err; 130 } 131 } else { 132 err = iopagetest(mem, 0, caller); 133 if (err) 134 return err; 135 136 err = iopagetest(mem, last, caller); 137 if (err) 138 return err; 139 140 err = iopagetest(mem, random_page(last), caller); 141 if (err) 142 return err; 143 } 144 145 return 0; 146 } 147 148 struct intel_memory_region * 149 intel_memory_region_lookup(struct drm_i915_private *i915, 150 u16 class, u16 instance) 151 { 152 struct intel_memory_region *mr; 153 int id; 154 155 /* XXX: consider maybe converting to an rb tree at some point */ 156 for_each_memory_region(mr, i915, id) { 157 if (mr->type == class && mr->instance == instance) 158 return mr; 159 } 160 161 return NULL; 162 } 163 164 struct intel_memory_region * 165 intel_memory_region_by_type(struct drm_i915_private *i915, 166 enum intel_memory_type mem_type) 167 { 168 struct intel_memory_region *mr; 169 int id; 170 171 for_each_memory_region(mr, i915, id) 172 if (mr->type == mem_type) 173 return mr; 174 175 return NULL; 176 } 177 178 /** 179 * intel_memory_region_reserve - Reserve a memory range 180 * @mem: The region for which we want to reserve a range. 181 * @offset: Start of the range to reserve. 182 * @size: The size of the range to reserve. 183 * 184 * Return: 0 on success, negative error code on failure. 185 */ 186 int intel_memory_region_reserve(struct intel_memory_region *mem, 187 resource_size_t offset, 188 resource_size_t size) 189 { 190 struct ttm_resource_manager *man = mem->region_private; 191 192 GEM_BUG_ON(mem->is_range_manager); 193 194 return i915_ttm_buddy_man_reserve(man, offset, size); 195 } 196 197 void intel_memory_region_debug(struct intel_memory_region *mr, 198 struct drm_printer *printer) 199 { 200 drm_printf(printer, "%s: ", mr->name); 201 202 if (mr->region_private) 203 ttm_resource_manager_debug(mr->region_private, printer); 204 else 205 drm_printf(printer, "total:%pa bytes\n", &mr->total); 206 } 207 208 static int intel_memory_region_memtest(struct intel_memory_region *mem, 209 void *caller) 210 { 211 struct drm_i915_private *i915 = mem->i915; 212 int err = 0; 213 214 if (!mem->io.start) 215 return 0; 216 217 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest) 218 err = iomemtest(mem, i915->params.memtest, caller); 219 220 return err; 221 } 222 223 struct intel_memory_region * 224 intel_memory_region_create(struct drm_i915_private *i915, 225 resource_size_t start, 226 resource_size_t size, 227 resource_size_t min_page_size, 228 resource_size_t io_start, 229 resource_size_t io_size, 230 u16 type, 231 u16 instance, 232 const struct intel_memory_region_ops *ops) 233 { 234 struct intel_memory_region *mem; 235 int err; 236 237 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 238 if (!mem) 239 return ERR_PTR(-ENOMEM); 240 241 mem->i915 = i915; 242 mem->region = DEFINE_RES_MEM(start, size); 243 mem->io = DEFINE_RES_MEM(io_start, io_size); 244 mem->min_page_size = min_page_size; 245 mem->ops = ops; 246 mem->total = size; 247 mem->type = type; 248 mem->instance = instance; 249 250 rw_init(&mem->objects.lock, "memobj"); 251 INIT_LIST_HEAD(&mem->objects.list); 252 253 if (ops->init) { 254 err = ops->init(mem); 255 if (err) 256 goto err_free; 257 } 258 259 err = intel_memory_region_memtest(mem, (void *)_RET_IP_); 260 if (err) 261 goto err_release; 262 263 return mem; 264 265 err_release: 266 if (mem->ops->release) 267 mem->ops->release(mem); 268 err_free: 269 kfree(mem); 270 return ERR_PTR(err); 271 } 272 273 void intel_memory_region_set_name(struct intel_memory_region *mem, 274 const char *fmt, ...) 275 { 276 va_list ap; 277 278 va_start(ap, fmt); 279 vsnprintf(mem->name, sizeof(mem->name), fmt, ap); 280 va_end(ap); 281 } 282 283 void intel_memory_region_avail(struct intel_memory_region *mr, 284 u64 *avail, u64 *visible_avail) 285 { 286 if (mr->type == INTEL_MEMORY_LOCAL) { 287 i915_ttm_buddy_man_avail(mr->region_private, 288 avail, visible_avail); 289 *avail <<= PAGE_SHIFT; 290 *visible_avail <<= PAGE_SHIFT; 291 } else { 292 *avail = mr->total; 293 *visible_avail = mr->total; 294 } 295 } 296 297 void intel_memory_region_destroy(struct intel_memory_region *mem) 298 { 299 int ret = 0; 300 301 if (mem->ops->release) 302 ret = mem->ops->release(mem); 303 304 #ifdef notyet 305 GEM_WARN_ON(!list_empty_careful(&mem->objects.list)); 306 #endif 307 mutex_destroy(&mem->objects.lock); 308 if (!ret) 309 kfree(mem); 310 } 311 312 /* Global memory region registration -- only slight layer inversions! */ 313 314 int intel_memory_regions_hw_probe(struct drm_i915_private *i915) 315 { 316 int err, i; 317 318 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 319 struct intel_memory_region *mem = ERR_PTR(-ENODEV); 320 u16 type, instance; 321 322 if (!HAS_REGION(i915, BIT(i))) 323 continue; 324 325 type = intel_region_map[i].class; 326 instance = intel_region_map[i].instance; 327 switch (type) { 328 case INTEL_MEMORY_SYSTEM: 329 if (IS_DGFX(i915)) 330 mem = i915_gem_ttm_system_setup(i915, type, 331 instance); 332 else 333 mem = i915_gem_shmem_setup(i915, type, 334 instance); 335 break; 336 case INTEL_MEMORY_STOLEN_LOCAL: 337 mem = i915_gem_stolen_lmem_setup(i915, type, instance); 338 if (!IS_ERR(mem)) 339 i915->mm.stolen_region = mem; 340 break; 341 case INTEL_MEMORY_STOLEN_SYSTEM: 342 mem = i915_gem_stolen_smem_setup(i915, type, instance); 343 if (!IS_ERR(mem)) 344 i915->mm.stolen_region = mem; 345 break; 346 default: 347 continue; 348 } 349 350 if (IS_ERR(mem)) { 351 err = PTR_ERR(mem); 352 drm_err(&i915->drm, 353 "Failed to setup region(%d) type=%d\n", 354 err, type); 355 goto out_cleanup; 356 } 357 358 mem->id = i; 359 i915->mm.regions[i] = mem; 360 } 361 362 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 363 struct intel_memory_region *mem = i915->mm.regions[i]; 364 u64 region_size, io_size; 365 366 if (!mem) 367 continue; 368 369 region_size = resource_size(&mem->region) >> 20; 370 io_size = resource_size(&mem->io) >> 20; 371 372 if (resource_size(&mem->io)) 373 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n", 374 mem->id, mem->name, region_size, &mem->region, io_size, &mem->io); 375 else 376 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n", 377 mem->id, mem->name, region_size, &mem->region); 378 } 379 380 return 0; 381 382 out_cleanup: 383 intel_memory_regions_driver_release(i915); 384 return err; 385 } 386 387 void intel_memory_regions_driver_release(struct drm_i915_private *i915) 388 { 389 int i; 390 391 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 392 struct intel_memory_region *region = 393 fetch_and_zero(&i915->mm.regions[i]); 394 395 if (region) 396 intel_memory_region_destroy(region); 397 } 398 } 399 400 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 401 #include "selftests/intel_memory_region.c" 402 #include "selftests/mock_region.c" 403 #endif 404