1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the 6 * "Software"), to deal in the Software without restriction, including 7 * without limitation the rights to use, copy, modify, merge, publish, 8 * distribute, sub license, and/or sell copies of the Software, and to 9 * permit persons to whom the Software is furnished to do so, subject to 10 * the following conditions: 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 18 * USE OR OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * The above copyright notice and this permission notice (including the 21 * next paragraph) shall be included in all copies or substantial portions 22 * of the Software. 23 * 24 */ 25 /* 26 * Authors: Dave Airlie <airlied@redhat.com> 27 */ 28 #include <drm/drmP.h> 29 #include "ast_drv.h" 30 #include <ttm/ttm_page_alloc.h> 31 32 static inline struct ast_private * 33 ast_bdev(struct ttm_bo_device *bd) 34 { 35 return container_of(bd, struct ast_private, ttm.bdev); 36 } 37 38 static int 39 ast_ttm_mem_global_init(struct drm_global_reference *ref) 40 { 41 return ttm_mem_global_init(ref->object); 42 } 43 44 static void 45 ast_ttm_mem_global_release(struct drm_global_reference *ref) 46 { 47 ttm_mem_global_release(ref->object); 48 } 49 50 static int ast_ttm_global_init(struct ast_private *ast) 51 { 52 struct drm_global_reference *global_ref; 53 int r; 54 55 global_ref = &ast->ttm.mem_global_ref; 56 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 57 global_ref->size = sizeof(struct ttm_mem_global); 58 global_ref->init = &ast_ttm_mem_global_init; 59 global_ref->release = &ast_ttm_mem_global_release; 60 r = drm_global_item_ref(global_ref); 61 if (r != 0) { 62 DRM_ERROR("Failed setting up TTM memory accounting " 63 "subsystem.\n"); 64 return r; 65 } 66 67 ast->ttm.bo_global_ref.mem_glob = 68 ast->ttm.mem_global_ref.object; 69 global_ref = &ast->ttm.bo_global_ref.ref; 70 global_ref->global_type = DRM_GLOBAL_TTM_BO; 71 global_ref->size = sizeof(struct ttm_bo_global); 72 global_ref->init = &ttm_bo_global_init; 73 global_ref->release = &ttm_bo_global_release; 74 r = drm_global_item_ref(global_ref); 75 if (r != 0) { 76 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 77 drm_global_item_unref(&ast->ttm.mem_global_ref); 78 return r; 79 } 80 return 0; 81 } 82 83 void 84 ast_ttm_global_release(struct ast_private *ast) 85 { 86 if (ast->ttm.mem_global_ref.release == NULL) 87 return; 88 89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); 90 drm_global_item_unref(&ast->ttm.mem_global_ref); 91 ast->ttm.mem_global_ref.release = NULL; 92 } 93 94 95 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) 96 { 97 struct ast_bo *bo; 98 99 bo = container_of(tbo, struct ast_bo, bo); 100 101 drm_gem_object_release(&bo->gem); 102 kfree(bo); 103 } 104 105 bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) 106 { 107 if (bo->destroy == &ast_bo_ttm_destroy) 108 return true; 109 return false; 110 } 111 112 static int 113 ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 114 struct ttm_mem_type_manager *man) 115 { 116 switch (type) { 117 case TTM_PL_SYSTEM: 118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 119 man->available_caching = TTM_PL_MASK_CACHING; 120 man->default_caching = TTM_PL_FLAG_CACHED; 121 break; 122 case TTM_PL_VRAM: 123 man->func = &ttm_bo_manager_func; 124 man->flags = TTM_MEMTYPE_FLAG_FIXED | 125 TTM_MEMTYPE_FLAG_MAPPABLE; 126 man->available_caching = TTM_PL_FLAG_UNCACHED | 127 TTM_PL_FLAG_WC; 128 man->default_caching = TTM_PL_FLAG_WC; 129 break; 130 default: 131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 132 return -EINVAL; 133 } 134 return 0; 135 } 136 137 static void 138 ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 139 { 140 struct ast_bo *astbo = ast_bo(bo); 141 142 if (!ast_ttm_bo_is_ast_bo(bo)) 143 return; 144 145 ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM); 146 *pl = astbo->placement; 147 } 148 149 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 150 { 151 return 0; 152 } 153 154 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 155 struct ttm_mem_reg *mem) 156 { 157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 158 struct ast_private *ast = ast_bdev(bdev); 159 160 mem->bus.addr = NULL; 161 mem->bus.offset = 0; 162 mem->bus.size = mem->num_pages << PAGE_SHIFT; 163 mem->bus.base = 0; 164 mem->bus.is_iomem = false; 165 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 166 return -EINVAL; 167 switch (mem->mem_type) { 168 case TTM_PL_SYSTEM: 169 /* system memory */ 170 return 0; 171 case TTM_PL_VRAM: 172 mem->bus.offset = mem->start << PAGE_SHIFT; 173 mem->bus.base = pci_resource_start(ast->dev->pdev, 0); 174 mem->bus.is_iomem = true; 175 break; 176 default: 177 return -EINVAL; 178 break; 179 } 180 return 0; 181 } 182 183 static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 184 { 185 } 186 187 static int ast_bo_move(struct ttm_buffer_object *bo, 188 bool evict, bool interruptible, 189 bool no_wait_gpu, 190 struct ttm_mem_reg *new_mem) 191 { 192 int r; 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 194 return r; 195 } 196 197 198 static void ast_ttm_backend_destroy(struct ttm_tt *tt) 199 { 200 ttm_tt_fini(tt); 201 kfree(tt); 202 } 203 204 static struct ttm_backend_func ast_tt_backend_func = { 205 .destroy = &ast_ttm_backend_destroy, 206 }; 207 208 209 struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, 210 unsigned long size, uint32_t page_flags, 211 struct page *dummy_read_page) 212 { 213 struct ttm_tt *tt; 214 215 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); 216 if (tt == NULL) 217 return NULL; 218 tt->func = &ast_tt_backend_func; 219 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { 220 kfree(tt); 221 return NULL; 222 } 223 return tt; 224 } 225 226 static int ast_ttm_tt_populate(struct ttm_tt *ttm) 227 { 228 return ttm_pool_populate(ttm); 229 } 230 231 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) 232 { 233 ttm_pool_unpopulate(ttm); 234 } 235 236 struct ttm_bo_driver ast_bo_driver = { 237 .ttm_tt_create = ast_ttm_tt_create, 238 .ttm_tt_populate = ast_ttm_tt_populate, 239 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, 240 .init_mem_type = ast_bo_init_mem_type, 241 .evict_flags = ast_bo_evict_flags, 242 .move = ast_bo_move, 243 .verify_access = ast_bo_verify_access, 244 .io_mem_reserve = &ast_ttm_io_mem_reserve, 245 .io_mem_free = &ast_ttm_io_mem_free, 246 }; 247 248 int ast_mm_init(struct ast_private *ast) 249 { 250 int ret; 251 struct drm_device *dev = ast->dev; 252 struct ttm_bo_device *bdev = &ast->ttm.bdev; 253 254 ret = ast_ttm_global_init(ast); 255 if (ret) 256 return ret; 257 258 ret = ttm_bo_device_init(&ast->ttm.bdev, 259 ast->ttm.bo_global_ref.ref.object, 260 &ast_bo_driver, DRM_FILE_PAGE_OFFSET, 261 true); 262 if (ret) { 263 DRM_ERROR("Error initialising bo driver; %d\n", ret); 264 return ret; 265 } 266 267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 268 ast->vram_size >> PAGE_SHIFT); 269 if (ret) { 270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 271 return ret; 272 } 273 274 ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0), 276 DRM_MTRR_WC); 277 278 return 0; 279 } 280 281 void ast_mm_fini(struct ast_private *ast) 282 { 283 struct drm_device *dev = ast->dev; 284 ttm_bo_device_release(&ast->ttm.bdev); 285 286 ast_ttm_global_release(ast); 287 288 if (ast->fb_mtrr >= 0) { 289 drm_mtrr_del(ast->fb_mtrr, 290 pci_resource_start(dev->pdev, 0), 291 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); 292 ast->fb_mtrr = -1; 293 } 294 } 295 296 void ast_ttm_placement(struct ast_bo *bo, int domain) 297 { 298 u32 c = 0; 299 bo->placement.fpfn = 0; 300 bo->placement.lpfn = 0; 301 bo->placement.placement = bo->placements; 302 bo->placement.busy_placement = bo->placements; 303 if (domain & TTM_PL_FLAG_VRAM) 304 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 305 if (domain & TTM_PL_FLAG_SYSTEM) 306 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 307 if (!c) 308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 309 bo->placement.num_placement = c; 310 bo->placement.num_busy_placement = c; 311 } 312 313 int ast_bo_reserve(struct ast_bo *bo, bool no_wait) 314 { 315 int ret; 316 317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); 318 if (ret) { 319 if (ret != -ERESTARTSYS) 320 DRM_ERROR("reserve failed %p\n", bo); 321 return ret; 322 } 323 return 0; 324 } 325 326 void ast_bo_unreserve(struct ast_bo *bo) 327 { 328 ttm_bo_unreserve(&bo->bo); 329 } 330 331 int ast_bo_create(struct drm_device *dev, int size, int align, 332 uint32_t flags, struct ast_bo **pastbo) 333 { 334 struct ast_private *ast = dev->dev_private; 335 struct ast_bo *astbo; 336 size_t acc_size; 337 int ret; 338 339 astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL); 340 if (!astbo) 341 return -ENOMEM; 342 343 ret = drm_gem_object_init(dev, &astbo->gem, size); 344 if (ret) { 345 kfree(astbo); 346 return ret; 347 } 348 349 astbo->gem.driver_private = NULL; 350 astbo->bo.bdev = &ast->ttm.bdev; 351 352 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 353 354 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size, 355 sizeof(struct ast_bo)); 356 357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, 358 ttm_bo_type_device, &astbo->placement, 359 align >> PAGE_SHIFT, false, NULL, acc_size, 360 NULL, ast_bo_ttm_destroy); 361 if (ret) 362 return ret; 363 364 *pastbo = astbo; 365 return 0; 366 } 367 368 static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) 369 { 370 return bo->bo.offset; 371 } 372 373 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) 374 { 375 int i, ret; 376 377 if (bo->pin_count) { 378 bo->pin_count++; 379 if (gpu_addr) 380 *gpu_addr = ast_bo_gpu_offset(bo); 381 } 382 383 ast_ttm_placement(bo, pl_flag); 384 for (i = 0; i < bo->placement.num_placement; i++) 385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 387 if (ret) 388 return ret; 389 390 bo->pin_count = 1; 391 if (gpu_addr) 392 *gpu_addr = ast_bo_gpu_offset(bo); 393 return 0; 394 } 395 396 int ast_bo_unpin(struct ast_bo *bo) 397 { 398 int i, ret; 399 if (!bo->pin_count) { 400 DRM_ERROR("unpin bad %p\n", bo); 401 return 0; 402 } 403 bo->pin_count--; 404 if (bo->pin_count) 405 return 0; 406 407 for (i = 0; i < bo->placement.num_placement ; i++) 408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 410 if (ret) 411 return ret; 412 413 return 0; 414 } 415 416 int ast_bo_push_sysram(struct ast_bo *bo) 417 { 418 int i, ret; 419 if (!bo->pin_count) { 420 DRM_ERROR("unpin bad %p\n", bo); 421 return 0; 422 } 423 bo->pin_count--; 424 if (bo->pin_count) 425 return 0; 426 427 if (bo->kmap.virtual) 428 ttm_bo_kunmap(&bo->kmap); 429 430 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 431 for (i = 0; i < bo->placement.num_placement ; i++) 432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 433 434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 435 if (ret) { 436 DRM_ERROR("pushing to VRAM failed\n"); 437 return ret; 438 } 439 return 0; 440 } 441 442 int ast_mmap(struct file *filp, struct vm_area_struct *vma) 443 { 444 struct drm_file *file_priv; 445 struct ast_private *ast; 446 447 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 448 return drm_mmap(filp, vma); 449 450 file_priv = filp->private_data; 451 ast = file_priv->minor->dev->dev_private; 452 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev); 453 } 454