1 /* $NetBSD: nouveau_ttm.c,v 1.7 2018/08/27 07:51:06 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, 5 * All Rights Reserved. 6 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, 7 * All Rights Reserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sub license, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: nouveau_ttm.c,v 1.7 2018/08/27 07:51:06 riastradh Exp $"); 31 32 #include <sys/param.h> 33 #include <uvm/uvm_extern.h> /* pmap_pv_track/untrack */ 34 35 #include "nouveau_drm.h" 36 #include "nouveau_ttm.h" 37 #include "nouveau_gem.h" 38 39 #include "drm_legacy.h" 40 41 #include <core/tegra.h> 42 43 static int 44 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 45 { 46 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 47 struct nvkm_fb *fb = nvxx_fb(&drm->device); 48 man->priv = fb; 49 return 0; 50 } 51 52 static int 53 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 54 { 55 man->priv = NULL; 56 return 0; 57 } 58 59 static inline void 60 nvkm_mem_node_cleanup(struct nvkm_mem *node) 61 { 62 if (node->vma[0].node) { 63 nvkm_vm_unmap(&node->vma[0]); 64 nvkm_vm_put(&node->vma[0]); 65 } 66 67 if (node->vma[1].node) { 68 nvkm_vm_unmap(&node->vma[1]); 69 nvkm_vm_put(&node->vma[1]); 70 } 71 } 72 73 static void 74 nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 75 struct ttm_mem_reg *mem) 76 { 77 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 78 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 79 nvkm_mem_node_cleanup(mem->mm_node); 80 ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node); 81 } 82 83 static int 84 nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 85 struct ttm_buffer_object *bo, 86 const struct ttm_place *place, 87 struct ttm_mem_reg *mem) 88 { 89 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 90 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 91 struct nouveau_bo *nvbo = nouveau_bo(bo); 92 struct nvkm_mem *node; 93 u32 size_nc = 0; 94 int ret; 95 96 if (drm->device.info.ram_size == 0) 97 return -ENOMEM; 98 99 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 100 size_nc = 1 << nvbo->page_shift; 101 102 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT, 103 mem->page_alignment << PAGE_SHIFT, size_nc, 104 (nvbo->tile_flags >> 8) & 0x3ff, &node); 105 if (ret) { 106 mem->mm_node = NULL; 107 return (ret == -ENOSPC) ? 0 : ret; 108 } 109 110 node->page_shift = nvbo->page_shift; 111 112 mem->mm_node = node; 113 mem->start = node->offset >> PAGE_SHIFT; 114 return 0; 115 } 116 117 const struct ttm_mem_type_manager_func nouveau_vram_manager = { 118 nouveau_vram_manager_init, 119 nouveau_vram_manager_fini, 120 nouveau_vram_manager_new, 121 nouveau_vram_manager_del, 122 }; 123 124 static int 125 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 126 { 127 return 0; 128 } 129 130 static int 131 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) 132 { 133 return 0; 134 } 135 136 static void 137 nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 138 struct ttm_mem_reg *mem) 139 { 140 nvkm_mem_node_cleanup(mem->mm_node); 141 kfree(mem->mm_node); 142 mem->mm_node = NULL; 143 } 144 145 static int 146 nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 147 struct ttm_buffer_object *bo, 148 const struct ttm_place *place, 149 struct ttm_mem_reg *mem) 150 { 151 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 152 struct nouveau_bo *nvbo = nouveau_bo(bo); 153 struct nvkm_mem *node; 154 155 node = kzalloc(sizeof(*node), GFP_KERNEL); 156 if (!node) 157 return -ENOMEM; 158 159 node->page_shift = 12; 160 161 switch (drm->device.info.family) { 162 case NV_DEVICE_INFO_V0_TNT: 163 case NV_DEVICE_INFO_V0_CELSIUS: 164 case NV_DEVICE_INFO_V0_KELVIN: 165 case NV_DEVICE_INFO_V0_RANKINE: 166 case NV_DEVICE_INFO_V0_CURIE: 167 break; 168 case NV_DEVICE_INFO_V0_TESLA: 169 if (drm->device.info.chipset != 0x50) 170 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 171 break; 172 case NV_DEVICE_INFO_V0_FERMI: 173 case NV_DEVICE_INFO_V0_KEPLER: 174 case NV_DEVICE_INFO_V0_MAXWELL: 175 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 176 break; 177 default: 178 NV_WARN(drm, "%s: unhandled family type %x\n", __func__, 179 drm->device.info.family); 180 break; 181 } 182 183 mem->mm_node = node; 184 mem->start = 0; 185 return 0; 186 } 187 188 static void 189 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 190 { 191 } 192 193 const struct ttm_mem_type_manager_func nouveau_gart_manager = { 194 nouveau_gart_manager_init, 195 nouveau_gart_manager_fini, 196 nouveau_gart_manager_new, 197 nouveau_gart_manager_del, 198 nouveau_gart_manager_debug 199 }; 200 201 /*XXX*/ 202 #include <subdev/mmu/nv04.h> 203 static int 204 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 205 { 206 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 207 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 208 struct nv04_mmu *priv = (void *)mmu; 209 struct nvkm_vm *vm = NULL; 210 nvkm_vm_ref(priv->vm, &vm, NULL); 211 man->priv = vm; 212 return 0; 213 } 214 215 static int 216 nv04_gart_manager_fini(struct ttm_mem_type_manager *man) 217 { 218 struct nvkm_vm *vm = man->priv; 219 nvkm_vm_ref(NULL, &vm, NULL); 220 man->priv = NULL; 221 return 0; 222 } 223 224 static void 225 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) 226 { 227 struct nvkm_mem *node = mem->mm_node; 228 if (node->vma[0].node) 229 nvkm_vm_put(&node->vma[0]); 230 kfree(mem->mm_node); 231 mem->mm_node = NULL; 232 } 233 234 static int 235 nv04_gart_manager_new(struct ttm_mem_type_manager *man, 236 struct ttm_buffer_object *bo, 237 const struct ttm_place *place, 238 struct ttm_mem_reg *mem) 239 { 240 struct nvkm_mem *node; 241 int ret; 242 243 node = kzalloc(sizeof(*node), GFP_KERNEL); 244 if (!node) 245 return -ENOMEM; 246 247 node->page_shift = 12; 248 249 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, 250 NV_MEM_ACCESS_RW, &node->vma[0]); 251 if (ret) { 252 kfree(node); 253 return ret; 254 } 255 256 mem->mm_node = node; 257 mem->start = node->vma[0].offset >> PAGE_SHIFT; 258 return 0; 259 } 260 261 static void 262 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 263 { 264 } 265 266 const struct ttm_mem_type_manager_func nv04_gart_manager = { 267 nv04_gart_manager_init, 268 nv04_gart_manager_fini, 269 nv04_gart_manager_new, 270 nv04_gart_manager_del, 271 nv04_gart_manager_debug 272 }; 273 274 #ifdef __NetBSD__ 275 276 int 277 nouveau_ttm_mmap_object(struct drm_device *dev, off_t offset, size_t size, 278 vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp, 279 struct file *file) 280 { 281 struct nouveau_drm *const drm = nouveau_drm(dev); 282 283 KASSERT(0 == (offset & (PAGE_SIZE - 1))); 284 285 if (__predict_false((offset >> PAGE_SHIFT) < DRM_FILE_PAGE_OFFSET)) 286 return drm_legacy_mmap_object(dev, offset, size, prot, uobjp, 287 uoffsetp, file); 288 else 289 return ttm_bo_mmap_object(&drm->ttm.bdev, offset, size, prot, 290 uobjp, uoffsetp, file); 291 } 292 293 #else 294 295 int 296 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 297 { 298 struct drm_file *file_priv = filp->private_data; 299 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 300 301 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 302 return drm_legacy_mmap(filp, vma); 303 304 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 305 } 306 307 #endif 308 309 static int 310 nouveau_ttm_mem_global_init(struct drm_global_reference *ref) 311 { 312 return ttm_mem_global_init(ref->object); 313 } 314 315 static void 316 nouveau_ttm_mem_global_release(struct drm_global_reference *ref) 317 { 318 ttm_mem_global_release(ref->object); 319 } 320 321 int 322 nouveau_ttm_global_init(struct nouveau_drm *drm) 323 { 324 struct drm_global_reference *global_ref; 325 int ret; 326 327 global_ref = &drm->ttm.mem_global_ref; 328 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 329 global_ref->size = sizeof(struct ttm_mem_global); 330 global_ref->init = &nouveau_ttm_mem_global_init; 331 global_ref->release = &nouveau_ttm_mem_global_release; 332 333 ret = drm_global_item_ref(global_ref); 334 if (unlikely(ret != 0)) { 335 DRM_ERROR("Failed setting up TTM memory accounting\n"); 336 drm->ttm.mem_global_ref.release = NULL; 337 return ret; 338 } 339 340 drm->ttm.bo_global_ref.mem_glob = global_ref->object; 341 global_ref = &drm->ttm.bo_global_ref.ref; 342 global_ref->global_type = DRM_GLOBAL_TTM_BO; 343 global_ref->size = sizeof(struct ttm_bo_global); 344 global_ref->init = &ttm_bo_global_init; 345 global_ref->release = &ttm_bo_global_release; 346 347 ret = drm_global_item_ref(global_ref); 348 if (unlikely(ret != 0)) { 349 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 350 drm_global_item_unref(&drm->ttm.mem_global_ref); 351 drm->ttm.mem_global_ref.release = NULL; 352 return ret; 353 } 354 355 return 0; 356 } 357 358 void 359 nouveau_ttm_global_release(struct nouveau_drm *drm) 360 { 361 if (drm->ttm.mem_global_ref.release == NULL) 362 return; 363 364 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); 365 drm_global_item_unref(&drm->ttm.mem_global_ref); 366 drm->ttm.mem_global_ref.release = NULL; 367 } 368 369 int 370 nouveau_ttm_init(struct nouveau_drm *drm) 371 { 372 struct nvkm_device *device = nvxx_device(&drm->device); 373 struct nvkm_pci *pci = device->pci; 374 struct drm_device *dev = drm->dev; 375 u8 bits; 376 int ret; 377 378 if (pci && pci->agp.bridge) { 379 drm->agp.bridge = pci->agp.bridge; 380 drm->agp.base = pci->agp.base; 381 drm->agp.size = pci->agp.size; 382 drm->agp.cma = pci->agp.cma; 383 } 384 385 bits = nvxx_mmu(&drm->device)->dma_bits; 386 if (nvxx_device(&drm->device)->func->pci) { 387 if (drm->agp.bridge) 388 bits = 32; 389 } else if (device->func->tegra) { 390 struct nvkm_device_tegra *tegra = device->func->tegra(device); 391 392 /* 393 * If the platform can use a IOMMU, then the addressable DMA 394 * space is constrained by the IOMMU bit 395 */ 396 if (tegra->func->iommu_bit) 397 bits = min(bits, tegra->func->iommu_bit); 398 399 } 400 401 #ifdef __NetBSD__ 402 ret = drm_limit_dma_space(dev, 0, DMA_BIT_MASK(bits)); 403 #else 404 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits)); 405 #endif 406 if (ret && bits != 32) { 407 bits = 32; 408 #ifdef __NetBSD__ 409 ret = drm_limit_dma_space(dev, 0, DMA_BIT_MASK(bits)); 410 #else 411 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits)); 412 #endif 413 } 414 if (ret) 415 return ret; 416 417 #ifndef __NetBSD__ /* XXX redundant with dma_limit_dma_space? */ 418 ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits)); 419 if (ret) 420 dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32)); 421 #endif 422 423 ret = nouveau_ttm_global_init(drm); 424 if (ret) 425 return ret; 426 427 ret = ttm_bo_device_init(&drm->ttm.bdev, 428 drm->ttm.bo_global_ref.ref.object, 429 &nouveau_bo_driver, 430 #ifdef __NetBSD__ 431 dev->bst, 432 dev->dmat, 433 #else 434 dev->anon_inode->i_mapping, 435 #endif 436 DRM_FILE_PAGE_OFFSET, 437 bits <= 32 ? true : false); 438 if (ret) { 439 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); 440 return ret; 441 } 442 443 /* VRAM init */ 444 drm->gem.vram_available = drm->device.info.ram_user; 445 446 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 447 drm->gem.vram_available >> PAGE_SHIFT); 448 if (ret) { 449 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); 450 return ret; 451 } 452 453 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1), 454 device->func->resource_size(device, 1)); 455 456 #ifdef __NetBSD__ 457 pmap_pv_track(device->func->resource_addr(device, 1), 458 device->func->resource_size(device, 1)); 459 #endif 460 461 /* GART init */ 462 if (!drm->agp.bridge) { 463 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 464 } else { 465 drm->gem.gart_available = drm->agp.size; 466 } 467 468 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, 469 drm->gem.gart_available >> PAGE_SHIFT); 470 if (ret) { 471 NV_ERROR(drm, "GART mm init failed, %d\n", ret); 472 return ret; 473 } 474 475 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); 476 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); 477 return 0; 478 } 479 480 void 481 nouveau_ttm_fini(struct nouveau_drm *drm) 482 { 483 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 484 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 485 486 ttm_bo_device_release(&drm->ttm.bdev); 487 488 nouveau_ttm_global_release(drm); 489 490 arch_phys_wc_del(drm->ttm.mtrr); 491 drm->ttm.mtrr = 0; 492 493 #ifdef __NetBSD__ 494 struct nvkm_device *device = nvxx_device(&drm->device); 495 pmap_pv_untrack(device->func->resource_addr(device, 1), 496 device->func->resource_size(device, 1)); 497 #endif 498 } 499