1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the 6 * "Software"), to deal in the Software without restriction, including 7 * without limitation the rights to use, copy, modify, merge, publish, 8 * distribute, sub license, and/or sell copies of the Software, and to 9 * permit persons to whom the Software is furnished to do so, subject to 10 * the following conditions: 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 18 * USE OR OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * The above copyright notice and this permission notice (including the 21 * next paragraph) shall be included in all copies or substantial portions 22 * of the Software. 23 * 24 */ 25 /* 26 * Authors: Dave Airlie <airlied@redhat.com> 27 */ 28 #include <drm/drmP.h> 29 #include "ast_drv.h" 30 #include <ttm/ttm_page_alloc.h> 31 32 static inline struct ast_private * 33 ast_bdev(struct ttm_bo_device *bd) 34 { 35 return container_of(bd, struct ast_private, ttm.bdev); 36 } 37 38 static int 39 ast_ttm_mem_global_init(struct drm_global_reference *ref) 40 { 41 return ttm_mem_global_init(ref->object); 42 } 43 44 static void 45 ast_ttm_mem_global_release(struct drm_global_reference *ref) 46 { 47 ttm_mem_global_release(ref->object); 48 } 49 50 static int ast_ttm_global_init(struct ast_private *ast) 51 { 52 struct drm_global_reference *global_ref; 53 int r; 54 55 global_ref = &ast->ttm.mem_global_ref; 56 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 57 global_ref->size = sizeof(struct ttm_mem_global); 58 global_ref->init = &ast_ttm_mem_global_init; 59 global_ref->release = &ast_ttm_mem_global_release; 60 r = drm_global_item_ref(global_ref); 61 if (r != 0) { 62 DRM_ERROR("Failed setting up TTM memory accounting " 63 "subsystem.\n"); 64 return r; 65 } 66 67 ast->ttm.bo_global_ref.mem_glob = 68 ast->ttm.mem_global_ref.object; 69 global_ref = &ast->ttm.bo_global_ref.ref; 70 global_ref->global_type = DRM_GLOBAL_TTM_BO; 71 global_ref->size = sizeof(struct ttm_bo_global); 72 global_ref->init = &ttm_bo_global_init; 73 global_ref->release = &ttm_bo_global_release; 74 r = drm_global_item_ref(global_ref); 75 if (r != 0) { 76 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 77 drm_global_item_unref(&ast->ttm.mem_global_ref); 78 return r; 79 } 80 return 0; 81 } 82 83 static void 84 ast_ttm_global_release(struct ast_private *ast) 85 { 86 if (ast->ttm.mem_global_ref.release == NULL) 87 return; 88 89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); 90 drm_global_item_unref(&ast->ttm.mem_global_ref); 91 ast->ttm.mem_global_ref.release = NULL; 92 } 93 94 95 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) 96 { 97 struct ast_bo *bo; 98 99 bo = container_of(tbo, struct ast_bo, bo); 100 101 drm_gem_object_release(&bo->gem); 102 kfree(bo); 103 } 104 105 static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) 106 { 107 if (bo->destroy == &ast_bo_ttm_destroy) 108 return true; 109 return false; 110 } 111 112 static int 113 ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 114 struct ttm_mem_type_manager *man) 115 { 116 switch (type) { 117 case TTM_PL_SYSTEM: 118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 119 man->available_caching = TTM_PL_MASK_CACHING; 120 man->default_caching = TTM_PL_FLAG_CACHED; 121 break; 122 case TTM_PL_VRAM: 123 man->func = &ttm_bo_manager_func; 124 man->flags = TTM_MEMTYPE_FLAG_FIXED | 125 TTM_MEMTYPE_FLAG_MAPPABLE; 126 man->available_caching = TTM_PL_FLAG_UNCACHED | 127 TTM_PL_FLAG_WC; 128 man->default_caching = TTM_PL_FLAG_WC; 129 break; 130 default: 131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 132 return -EINVAL; 133 } 134 return 0; 135 } 136 137 static void 138 ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 139 { 140 struct ast_bo *astbo = ast_bo(bo); 141 142 if (!ast_ttm_bo_is_ast_bo(bo)) 143 return; 144 145 ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM); 146 *pl = astbo->placement; 147 } 148 149 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 150 { 151 struct ast_bo *astbo = ast_bo(bo); 152 153 return drm_vma_node_verify_access(&astbo->gem.vma_node, filp); 154 } 155 156 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 157 struct ttm_mem_reg *mem) 158 { 159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 160 struct ast_private *ast = ast_bdev(bdev); 161 162 mem->bus.addr = NULL; 163 mem->bus.offset = 0; 164 mem->bus.size = mem->num_pages << PAGE_SHIFT; 165 mem->bus.base = 0; 166 mem->bus.is_iomem = false; 167 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 168 return -EINVAL; 169 switch (mem->mem_type) { 170 case TTM_PL_SYSTEM: 171 /* system memory */ 172 return 0; 173 case TTM_PL_VRAM: 174 mem->bus.offset = mem->start << PAGE_SHIFT; 175 mem->bus.base = pci_resource_start(ast->dev->pdev, 0); 176 mem->bus.is_iomem = true; 177 break; 178 default: 179 return -EINVAL; 180 break; 181 } 182 return 0; 183 } 184 185 static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 186 { 187 } 188 189 static int ast_bo_move(struct ttm_buffer_object *bo, 190 bool evict, bool interruptible, 191 bool no_wait_gpu, 192 struct ttm_mem_reg *new_mem) 193 { 194 int r; 195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 196 return r; 197 } 198 199 200 static void ast_ttm_backend_destroy(struct ttm_tt *tt) 201 { 202 ttm_tt_fini(tt); 203 kfree(tt); 204 } 205 206 static struct ttm_backend_func ast_tt_backend_func = { 207 .destroy = &ast_ttm_backend_destroy, 208 }; 209 210 211 static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, 212 unsigned long size, uint32_t page_flags, 213 struct page *dummy_read_page) 214 { 215 struct ttm_tt *tt; 216 217 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); 218 if (tt == NULL) 219 return NULL; 220 tt->func = &ast_tt_backend_func; 221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { 222 kfree(tt); 223 return NULL; 224 } 225 return tt; 226 } 227 228 static int ast_ttm_tt_populate(struct ttm_tt *ttm) 229 { 230 return ttm_pool_populate(ttm); 231 } 232 233 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) 234 { 235 ttm_pool_unpopulate(ttm); 236 } 237 238 struct ttm_bo_driver ast_bo_driver = { 239 .ttm_tt_create = ast_ttm_tt_create, 240 .ttm_tt_populate = ast_ttm_tt_populate, 241 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, 242 .init_mem_type = ast_bo_init_mem_type, 243 .evict_flags = ast_bo_evict_flags, 244 .move = ast_bo_move, 245 .verify_access = ast_bo_verify_access, 246 .io_mem_reserve = &ast_ttm_io_mem_reserve, 247 .io_mem_free = &ast_ttm_io_mem_free, 248 }; 249 250 int ast_mm_init(struct ast_private *ast) 251 { 252 int ret; 253 struct drm_device *dev = ast->dev; 254 struct ttm_bo_device *bdev = &ast->ttm.bdev; 255 256 ret = ast_ttm_global_init(ast); 257 if (ret) 258 return ret; 259 260 ret = ttm_bo_device_init(&ast->ttm.bdev, 261 ast->ttm.bo_global_ref.ref.object, 262 &ast_bo_driver, 263 dev->anon_inode->i_mapping, 264 DRM_FILE_PAGE_OFFSET, 265 true); 266 if (ret) { 267 DRM_ERROR("Error initialising bo driver; %d\n", ret); 268 return ret; 269 } 270 271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 272 ast->vram_size >> PAGE_SHIFT); 273 if (ret) { 274 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 275 return ret; 276 } 277 278 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 279 pci_resource_len(dev->pdev, 0)); 280 281 return 0; 282 } 283 284 void ast_mm_fini(struct ast_private *ast) 285 { 286 ttm_bo_device_release(&ast->ttm.bdev); 287 288 ast_ttm_global_release(ast); 289 290 arch_phys_wc_del(ast->fb_mtrr); 291 } 292 293 void ast_ttm_placement(struct ast_bo *bo, int domain) 294 { 295 u32 c = 0; 296 bo->placement.fpfn = 0; 297 bo->placement.lpfn = 0; 298 bo->placement.placement = bo->placements; 299 bo->placement.busy_placement = bo->placements; 300 if (domain & TTM_PL_FLAG_VRAM) 301 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 302 if (domain & TTM_PL_FLAG_SYSTEM) 303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 304 if (!c) 305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 306 bo->placement.num_placement = c; 307 bo->placement.num_busy_placement = c; 308 } 309 310 int ast_bo_create(struct drm_device *dev, int size, int align, 311 uint32_t flags, struct ast_bo **pastbo) 312 { 313 struct ast_private *ast = dev->dev_private; 314 struct ast_bo *astbo; 315 size_t acc_size; 316 int ret; 317 318 astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL); 319 if (!astbo) 320 return -ENOMEM; 321 322 ret = drm_gem_object_init(dev, &astbo->gem, size); 323 if (ret) { 324 kfree(astbo); 325 return ret; 326 } 327 328 astbo->bo.bdev = &ast->ttm.bdev; 329 330 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 331 332 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size, 333 sizeof(struct ast_bo)); 334 335 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, 336 ttm_bo_type_device, &astbo->placement, 337 align >> PAGE_SHIFT, false, NULL, acc_size, 338 NULL, ast_bo_ttm_destroy); 339 if (ret) 340 return ret; 341 342 *pastbo = astbo; 343 return 0; 344 } 345 346 static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) 347 { 348 return bo->bo.offset; 349 } 350 351 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) 352 { 353 int i, ret; 354 355 if (bo->pin_count) { 356 bo->pin_count++; 357 if (gpu_addr) 358 *gpu_addr = ast_bo_gpu_offset(bo); 359 } 360 361 ast_ttm_placement(bo, pl_flag); 362 for (i = 0; i < bo->placement.num_placement; i++) 363 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 364 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 365 if (ret) 366 return ret; 367 368 bo->pin_count = 1; 369 if (gpu_addr) 370 *gpu_addr = ast_bo_gpu_offset(bo); 371 return 0; 372 } 373 374 int ast_bo_unpin(struct ast_bo *bo) 375 { 376 int i, ret; 377 if (!bo->pin_count) { 378 DRM_ERROR("unpin bad %p\n", bo); 379 return 0; 380 } 381 bo->pin_count--; 382 if (bo->pin_count) 383 return 0; 384 385 for (i = 0; i < bo->placement.num_placement ; i++) 386 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 387 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 388 if (ret) 389 return ret; 390 391 return 0; 392 } 393 394 int ast_bo_push_sysram(struct ast_bo *bo) 395 { 396 int i, ret; 397 if (!bo->pin_count) { 398 DRM_ERROR("unpin bad %p\n", bo); 399 return 0; 400 } 401 bo->pin_count--; 402 if (bo->pin_count) 403 return 0; 404 405 if (bo->kmap.virtual) 406 ttm_bo_kunmap(&bo->kmap); 407 408 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 409 for (i = 0; i < bo->placement.num_placement ; i++) 410 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 411 412 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 413 if (ret) { 414 DRM_ERROR("pushing to VRAM failed\n"); 415 return ret; 416 } 417 return 0; 418 } 419 420 int ast_mmap(struct file *filp, struct vm_area_struct *vma) 421 { 422 struct drm_file *file_priv; 423 struct ast_private *ast; 424 425 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 426 return drm_mmap(filp, vma); 427 428 file_priv = filp->private_data; 429 ast = file_priv->minor->dev->dev_private; 430 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev); 431 } 432