1 /* $NetBSD: amdgpu_fb.c,v 1.11 2021/12/20 20:34:58 chs Exp $ */ 2 3 /* 4 * Copyright © 2007 David Airlie 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 * DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: 26 * David Airlie 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: amdgpu_fb.c,v 1.11 2021/12/20 20:34:58 chs Exp $"); 31 32 #include <linux/module.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/slab.h> 35 #include <linux/vga_switcheroo.h> 36 37 #include <drm/amdgpu_drm.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_crtc_helper.h> 40 #include <drm/drm_fb_helper.h> 41 #include <drm/drm_fourcc.h> 42 43 #include "amdgpu.h" 44 #include "cikd.h" 45 #include "amdgpu_gem.h" 46 47 #include "amdgpu_display.h" 48 49 #ifdef __NetBSD__ 50 #include "amdgpufb.h" 51 #endif 52 53 #include <linux/nbsd-namespace.h> 54 55 /* object hierarchy - 56 this contains a helper + a amdgpu fb 57 the helper contains a pointer to amdgpu framebuffer baseclass. 58 */ 59 60 #ifndef __NetBSD__ 61 62 static int 63 amdgpufb_open(struct fb_info *info, int user) 64 { 65 struct drm_fb_helper *fb_helper = info->par; 66 int ret = pm_runtime_get_sync(fb_helper->dev->dev); 67 if (ret < 0 && ret != -EACCES) { 68 pm_runtime_mark_last_busy(fb_helper->dev->dev); 69 pm_runtime_put_autosuspend(fb_helper->dev->dev); 70 return ret; 71 } 72 return 0; 73 } 74 75 static int 76 amdgpufb_release(struct fb_info *info, int user) 77 { 78 struct drm_fb_helper *fb_helper = info->par; 79 80 pm_runtime_mark_last_busy(fb_helper->dev->dev); 81 pm_runtime_put_autosuspend(fb_helper->dev->dev); 82 return 0; 83 } 84 85 static const struct fb_ops amdgpufb_ops = { 86 .owner = THIS_MODULE, 87 DRM_FB_HELPER_DEFAULT_OPS, 88 .fb_open = amdgpufb_open, 89 .fb_release = amdgpufb_release, 90 .fb_fillrect = drm_fb_helper_cfb_fillrect, 91 .fb_copyarea = drm_fb_helper_cfb_copyarea, 92 .fb_imageblit = drm_fb_helper_cfb_imageblit, 93 }; 94 95 #endif 96 97 98 int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) 99 { 100 int aligned = width; 101 int pitch_mask = 0; 102 103 switch (cpp) { 104 case 1: 105 pitch_mask = 255; 106 break; 107 case 2: 108 pitch_mask = 127; 109 break; 110 case 3: 111 case 4: 112 pitch_mask = 63; 113 break; 114 } 115 116 aligned += pitch_mask; 117 aligned &= ~pitch_mask; 118 return aligned * cpp; 119 } 120 121 static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) 122 { 123 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 124 int ret; 125 126 ret = amdgpu_bo_reserve(abo, true); 127 if (likely(ret == 0)) { 128 amdgpu_bo_kunmap(abo); 129 amdgpu_bo_unpin(abo); 130 amdgpu_bo_unreserve(abo); 131 } 132 drm_gem_object_put_unlocked(gobj); 133 } 134 135 static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, 136 struct drm_mode_fb_cmd2 *mode_cmd, 137 struct drm_gem_object **gobj_p) 138 { 139 const struct drm_format_info *info; 140 struct amdgpu_device *adev = rfbdev->adev; 141 struct drm_gem_object *gobj = NULL; 142 struct amdgpu_bo *abo = NULL; 143 bool fb_tiled = false; /* useful for testing */ 144 u32 tiling_flags = 0, domain; 145 int ret; 146 int aligned_size, size; 147 int height = mode_cmd->height; 148 u32 cpp; 149 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 150 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 151 AMDGPU_GEM_CREATE_VRAM_CLEARED | 152 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 153 154 info = drm_get_format_info(adev->ddev, mode_cmd); 155 cpp = info->cpp[0]; 156 157 /* need to align pitch with crtc limits */ 158 mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, 159 fb_tiled); 160 domain = amdgpu_display_supported_domains(adev, flags); 161 height = ALIGN(mode_cmd->height, 8); 162 size = mode_cmd->pitches[0] * height; 163 aligned_size = ALIGN(size, PAGE_SIZE); 164 ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, 165 ttm_bo_type_kernel, NULL, &gobj); 166 if (ret) { 167 pr_err("failed to allocate framebuffer (%d)\n", aligned_size); 168 return -ENOMEM; 169 } 170 abo = gem_to_amdgpu_bo(gobj); 171 172 if (fb_tiled) 173 tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); 174 175 ret = amdgpu_bo_reserve(abo, false); 176 if (unlikely(ret != 0)) 177 goto out_unref; 178 179 if (tiling_flags) { 180 ret = amdgpu_bo_set_tiling_flags(abo, 181 tiling_flags); 182 if (ret) 183 dev_err(adev->dev, "FB failed to set tiling flags\n"); 184 } 185 186 ret = amdgpu_bo_pin(abo, domain); 187 if (ret) { 188 amdgpu_bo_unreserve(abo); 189 goto out_unref; 190 } 191 192 ret = amdgpu_ttm_alloc_gart(&abo->tbo); 193 if (ret) { 194 amdgpu_bo_unreserve(abo); 195 dev_err(adev->dev, "%p bind failed\n", abo); 196 goto out_unref; 197 } 198 199 ret = amdgpu_bo_kmap(abo, NULL); 200 amdgpu_bo_unreserve(abo); 201 if (ret) { 202 goto out_unref; 203 } 204 205 *gobj_p = gobj; 206 return 0; 207 out_unref: 208 amdgpufb_destroy_pinned_object(gobj); 209 *gobj_p = NULL; 210 return ret; 211 } 212 213 static int amdgpufb_create(struct drm_fb_helper *helper, 214 struct drm_fb_helper_surface_size *sizes) 215 { 216 struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; 217 struct amdgpu_device *adev = rfbdev->adev; 218 struct fb_info *info; 219 struct drm_framebuffer *fb = NULL; 220 struct drm_mode_fb_cmd2 mode_cmd; 221 struct drm_gem_object *gobj = NULL; 222 struct amdgpu_bo *abo = NULL; 223 int ret; 224 unsigned long tmp; 225 226 mode_cmd.width = sizes->surface_width; 227 mode_cmd.height = sizes->surface_height; 228 229 if (sizes->surface_bpp == 24) 230 sizes->surface_bpp = 32; 231 232 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 233 sizes->surface_depth); 234 235 ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); 236 if (ret) { 237 DRM_ERROR("failed to create fbcon object %d\n", ret); 238 return ret; 239 } 240 241 abo = gem_to_amdgpu_bo(gobj); 242 243 #ifndef __NetBSD__ 244 /* okay we have an object now allocate the framebuffer */ 245 info = drm_fb_helper_alloc_fbi(helper); 246 if (IS_ERR(info)) { 247 ret = PTR_ERR(info); 248 goto out; 249 } 250 #endif 251 252 ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, 253 &mode_cmd, gobj); 254 if (ret) { 255 DRM_ERROR("failed to initialize framebuffer %d\n", ret); 256 goto out; 257 } 258 259 fb = &rfbdev->rfb.base; 260 261 /* setup helper */ 262 rfbdev->helper.fb = fb; 263 264 #ifdef __NetBSD__ 265 { 266 static const struct amdgpufb_attach_args zero_afa; 267 struct amdgpufb_attach_args afa = zero_afa; 268 269 __USE(tmp); 270 __USE(info); 271 272 afa.afa_fb_helper = helper; 273 afa.afa_fb_sizes = *sizes; 274 afa.afa_fb_ptr = amdgpu_bo_kptr(abo); 275 afa.afa_fb_linebytes = mode_cmd.pitches[0]; 276 277 KERNEL_LOCK(1, NULL); 278 helper->fbdev = config_found(adev->ddev->dev, &afa, NULL, 279 CFARGS(.iattr = "amdgpufbbus")); 280 KERNEL_UNLOCK_ONE(NULL); 281 if (helper->fbdev == NULL) { 282 DRM_ERROR("failed to attach amdgpufb\n"); 283 goto out; 284 } 285 } 286 #else /* __NetBSD__ */ 287 info->fbops = &amdgpufb_ops; 288 289 tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; 290 info->fix.smem_start = adev->gmc.aper_base + tmp; 291 info->fix.smem_len = amdgpu_bo_size(abo); 292 info->screen_base = amdgpu_bo_kptr(abo); 293 info->screen_size = amdgpu_bo_size(abo); 294 295 drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); 296 297 /* setup aperture base/size for vesafb takeover */ 298 info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; 299 info->apertures->ranges[0].size = adev->gmc.aper_size; 300 301 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 302 303 if (info->screen_base == NULL) { 304 ret = -ENOSPC; 305 goto out; 306 } 307 308 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); 309 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); 310 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); 311 DRM_INFO("fb depth is %d\n", fb->format->depth); 312 DRM_INFO(" pitch is %d\n", fb->pitches[0]); 313 314 vga_switcheroo_client_fb_set(adev->ddev->pdev, info); 315 #endif 316 return 0; 317 318 out: 319 if (abo) { 320 321 } 322 if (fb && ret) { 323 drm_gem_object_put_unlocked(gobj); 324 drm_framebuffer_unregister_private(fb); 325 drm_framebuffer_cleanup(fb); 326 kfree(fb); 327 } 328 return ret; 329 } 330 331 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) 332 { 333 struct amdgpu_framebuffer *rfb = &rfbdev->rfb; 334 335 drm_fb_helper_unregister_fbi(&rfbdev->helper); 336 337 if (rfb->base.obj[0]) { 338 amdgpufb_destroy_pinned_object(rfb->base.obj[0]); 339 rfb->base.obj[0] = NULL; 340 drm_framebuffer_unregister_private(&rfb->base); 341 drm_framebuffer_cleanup(&rfb->base); 342 } 343 drm_fb_helper_fini(&rfbdev->helper); 344 345 return 0; 346 } 347 348 static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { 349 .fb_probe = amdgpufb_create, 350 }; 351 352 int amdgpu_fbdev_init(struct amdgpu_device *adev) 353 { 354 struct amdgpu_fbdev *rfbdev; 355 int bpp_sel = 32; 356 int ret; 357 358 /* don't init fbdev on hw without DCE */ 359 if (!adev->mode_info.mode_config_initialized) 360 return 0; 361 362 /* don't init fbdev if there are no connectors */ 363 if (list_empty(&adev->ddev->mode_config.connector_list)) 364 return 0; 365 366 /* select 8 bpp console on low vram cards */ 367 if (adev->gmc.real_vram_size <= (32*1024*1024)) 368 bpp_sel = 8; 369 370 rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); 371 if (!rfbdev) 372 return -ENOMEM; 373 374 rfbdev->adev = adev; 375 adev->mode_info.rfbdev = rfbdev; 376 377 drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, 378 &amdgpu_fb_helper_funcs); 379 380 ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, 381 AMDGPUFB_CONN_LIMIT); 382 if (ret) { 383 kfree(rfbdev); 384 return ret; 385 } 386 387 drm_fb_helper_single_add_all_connectors(&rfbdev->helper); 388 389 /* disable all the possible outputs/crtcs before entering KMS mode */ 390 if (!amdgpu_device_has_dc_support(adev)) 391 drm_helper_disable_unused_functions(adev->ddev); 392 393 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); 394 return 0; 395 } 396 397 void amdgpu_fbdev_fini(struct amdgpu_device *adev) 398 { 399 if (!adev->mode_info.rfbdev) 400 return; 401 402 amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); 403 kfree(adev->mode_info.rfbdev); 404 adev->mode_info.rfbdev = NULL; 405 } 406 407 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) 408 { 409 if (adev->mode_info.rfbdev) 410 drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, 411 state); 412 } 413 414 int amdgpu_fbdev_total_size(struct amdgpu_device *adev) 415 { 416 struct amdgpu_bo *robj; 417 int size = 0; 418 419 if (!adev->mode_info.rfbdev) 420 return 0; 421 422 robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); 423 size += amdgpu_bo_size(robj); 424 return size; 425 } 426 427 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) 428 { 429 if (!adev->mode_info.rfbdev) 430 return false; 431 if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) 432 return true; 433 return false; 434 } 435