1fb4d8502Sjsg /* 2fb4d8502Sjsg * Copyright 2007-8 Advanced Micro Devices, Inc. 3fb4d8502Sjsg * Copyright 2008 Red Hat Inc. 4fb4d8502Sjsg * 5fb4d8502Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 6fb4d8502Sjsg * copy of this software and associated documentation files (the "Software"), 7fb4d8502Sjsg * to deal in the Software without restriction, including without limitation 8fb4d8502Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9fb4d8502Sjsg * and/or sell copies of the Software, and to permit persons to whom the 10fb4d8502Sjsg * Software is furnished to do so, subject to the following conditions: 11fb4d8502Sjsg * 12fb4d8502Sjsg * The above copyright notice and this permission notice shall be included in 13fb4d8502Sjsg * all copies or substantial portions of the Software. 14fb4d8502Sjsg * 15fb4d8502Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16fb4d8502Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17fb4d8502Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18fb4d8502Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19fb4d8502Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20fb4d8502Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21fb4d8502Sjsg * OTHER DEALINGS IN THE SOFTWARE. 22fb4d8502Sjsg * 23fb4d8502Sjsg * Authors: Dave Airlie 24fb4d8502Sjsg * Alex Deucher 25fb4d8502Sjsg */ 26c349dbc7Sjsg 27fb4d8502Sjsg #include <drm/amdgpu_drm.h> 28fb4d8502Sjsg #include "amdgpu.h" 29fb4d8502Sjsg #include "amdgpu_i2c.h" 30fb4d8502Sjsg #include "atom.h" 31fb4d8502Sjsg #include "amdgpu_connectors.h" 32fb4d8502Sjsg #include "amdgpu_display.h" 331bb76ff1Sjsg #include "soc15_common.h" 341bb76ff1Sjsg #include "gc/gc_11_0_0_offset.h" 351bb76ff1Sjsg #include "gc/gc_11_0_0_sh_mask.h" 36fb4d8502Sjsg #include <asm/div64.h> 37fb4d8502Sjsg 38c349dbc7Sjsg #include <linux/pci.h> 39fb4d8502Sjsg #include <linux/pm_runtime.h> 40fb4d8502Sjsg #include <drm/drm_crtc_helper.h> 417cddb3e7Sjsg #include <drm/drm_damage_helper.h> 427cddb3e7Sjsg #include <drm/drm_drv.h> 43fb4d8502Sjsg #include <drm/drm_edid.h> 44fb4d8502Sjsg #include <drm/drm_fb_helper.h> 45f005ef32Sjsg #include <drm/drm_gem_framebuffer_helper.h> 465ca02815Sjsg #include <drm/drm_fourcc.h> 47f005ef32Sjsg #include <drm/drm_modeset_helper.h> 48c349dbc7Sjsg #include <drm/drm_vblank.h> 49fb4d8502Sjsg 50f005ef32Sjsg /** 51f005ef32Sjsg * amdgpu_display_hotplug_work_func - work handler for display hotplug event 52f005ef32Sjsg * 53f005ef32Sjsg * @work: work struct pointer 54f005ef32Sjsg * 55f005ef32Sjsg * This is the hotplug event work handler (all ASICs). 56f005ef32Sjsg * The work gets scheduled from the IRQ handler if there 57f005ef32Sjsg * was a hotplug interrupt. It walks through the connector table 58f005ef32Sjsg * and calls hotplug handler for each connector. After this, it sends 59f005ef32Sjsg * a DRM hotplug event to alert userspace. 60f005ef32Sjsg * 61f005ef32Sjsg * This design approach is required in order to defer hotplug event handling 62f005ef32Sjsg * from the IRQ handler to a work handler because hotplug handler has to use 63f005ef32Sjsg * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may 64f005ef32Sjsg * sleep). 65f005ef32Sjsg */ 66f005ef32Sjsg void amdgpu_display_hotplug_work_func(struct work_struct *work) 67f005ef32Sjsg { 68f005ef32Sjsg struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 69f005ef32Sjsg hotplug_work.work); 70f005ef32Sjsg struct drm_device *dev = adev_to_drm(adev); 71f005ef32Sjsg struct drm_mode_config *mode_config = &dev->mode_config; 72f005ef32Sjsg struct drm_connector *connector; 73f005ef32Sjsg struct drm_connector_list_iter iter; 74f005ef32Sjsg 75f005ef32Sjsg mutex_lock(&mode_config->mutex); 76f005ef32Sjsg drm_connector_list_iter_begin(dev, &iter); 77f005ef32Sjsg drm_for_each_connector_iter(connector, &iter) 78f005ef32Sjsg amdgpu_connector_hotplug(connector); 79f005ef32Sjsg drm_connector_list_iter_end(&iter); 80f005ef32Sjsg mutex_unlock(&mode_config->mutex); 81f005ef32Sjsg /* Just fire off a uevent and let userspace tell us what to do */ 82f005ef32Sjsg drm_helper_hpd_irq_event(dev); 83f005ef32Sjsg } 84f005ef32Sjsg 851bb76ff1Sjsg static int amdgpu_display_framebuffer_init(struct drm_device *dev, 861bb76ff1Sjsg struct amdgpu_framebuffer *rfb, 871bb76ff1Sjsg const struct drm_mode_fb_cmd2 *mode_cmd, 881bb76ff1Sjsg struct drm_gem_object *obj); 891bb76ff1Sjsg 90fb4d8502Sjsg static void amdgpu_display_flip_callback(struct dma_fence *f, 91fb4d8502Sjsg struct dma_fence_cb *cb) 92fb4d8502Sjsg { 93fb4d8502Sjsg struct amdgpu_flip_work *work = 94fb4d8502Sjsg container_of(cb, struct amdgpu_flip_work, cb); 95fb4d8502Sjsg 96fb4d8502Sjsg dma_fence_put(f); 97fb4d8502Sjsg schedule_work(&work->flip_work.work); 98fb4d8502Sjsg } 99fb4d8502Sjsg 100fb4d8502Sjsg static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, 101fb4d8502Sjsg struct dma_fence **f) 102fb4d8502Sjsg { 103fb4d8502Sjsg struct dma_fence *fence = *f; 104fb4d8502Sjsg 105fb4d8502Sjsg if (fence == NULL) 106fb4d8502Sjsg return false; 107fb4d8502Sjsg 108fb4d8502Sjsg *f = NULL; 109fb4d8502Sjsg 110fb4d8502Sjsg if (!dma_fence_add_callback(fence, &work->cb, 111fb4d8502Sjsg amdgpu_display_flip_callback)) 112fb4d8502Sjsg return true; 113fb4d8502Sjsg 114fb4d8502Sjsg dma_fence_put(fence); 115fb4d8502Sjsg return false; 116fb4d8502Sjsg } 117fb4d8502Sjsg 118fb4d8502Sjsg static void amdgpu_display_flip_work_func(struct work_struct *__work) 119fb4d8502Sjsg { 120fb4d8502Sjsg struct delayed_work *delayed_work = 121fb4d8502Sjsg container_of(__work, struct delayed_work, work); 122fb4d8502Sjsg struct amdgpu_flip_work *work = 123fb4d8502Sjsg container_of(delayed_work, struct amdgpu_flip_work, flip_work); 124fb4d8502Sjsg struct amdgpu_device *adev = work->adev; 125fb4d8502Sjsg struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; 126fb4d8502Sjsg 127fb4d8502Sjsg struct drm_crtc *crtc = &amdgpu_crtc->base; 128fb4d8502Sjsg unsigned long flags; 12909f85244Sjsg unsigned int i; 130fb4d8502Sjsg int vpos, hpos; 131fb4d8502Sjsg 132fb4d8502Sjsg for (i = 0; i < work->shared_count; ++i) 133fb4d8502Sjsg if (amdgpu_display_flip_handle_fence(work, &work->shared[i])) 134fb4d8502Sjsg return; 135fb4d8502Sjsg 136fb4d8502Sjsg /* Wait until we're out of the vertical blank period before the one 137fb4d8502Sjsg * targeted by the flip 138fb4d8502Sjsg */ 139fb4d8502Sjsg if (amdgpu_crtc->enabled && 140ad8b1aafSjsg (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0, 141fb4d8502Sjsg &vpos, &hpos, NULL, NULL, 142fb4d8502Sjsg &crtc->hwmode) 143fb4d8502Sjsg & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 144fb4d8502Sjsg (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 145fb4d8502Sjsg (int)(work->target_vblank - 146c349dbc7Sjsg amdgpu_get_vblank_counter_kms(crtc)) > 0) { 147fb4d8502Sjsg schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); 148fb4d8502Sjsg return; 149fb4d8502Sjsg } 150fb4d8502Sjsg 151fb4d8502Sjsg /* We borrow the event spin lock for protecting flip_status */ 152fb4d8502Sjsg spin_lock_irqsave(&crtc->dev->event_lock, flags); 153fb4d8502Sjsg 154fb4d8502Sjsg /* Do the flip (mmio) */ 155fb4d8502Sjsg adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); 156fb4d8502Sjsg 157fb4d8502Sjsg /* Set the flip status */ 158fb4d8502Sjsg amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 159fb4d8502Sjsg spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 160fb4d8502Sjsg 161fb4d8502Sjsg 1621bb76ff1Sjsg drm_dbg_vbl(adev_to_drm(adev), 1631bb76ff1Sjsg "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", 164fb4d8502Sjsg amdgpu_crtc->crtc_id, amdgpu_crtc, work); 165fb4d8502Sjsg 166fb4d8502Sjsg } 167fb4d8502Sjsg 168fb4d8502Sjsg /* 169fb4d8502Sjsg * Handle unpin events outside the interrupt handler proper. 170fb4d8502Sjsg */ 171fb4d8502Sjsg static void amdgpu_display_unpin_work_func(struct work_struct *__work) 172fb4d8502Sjsg { 173fb4d8502Sjsg struct amdgpu_flip_work *work = 174fb4d8502Sjsg container_of(__work, struct amdgpu_flip_work, unpin_work); 175fb4d8502Sjsg int r; 176fb4d8502Sjsg 177fb4d8502Sjsg /* unpin of the old buffer */ 178fb4d8502Sjsg r = amdgpu_bo_reserve(work->old_abo, true); 179fb4d8502Sjsg if (likely(r == 0)) { 1805ca02815Sjsg amdgpu_bo_unpin(work->old_abo); 181fb4d8502Sjsg amdgpu_bo_unreserve(work->old_abo); 182fb4d8502Sjsg } else 183fb4d8502Sjsg DRM_ERROR("failed to reserve buffer after flip\n"); 184fb4d8502Sjsg 185fb4d8502Sjsg amdgpu_bo_unref(&work->old_abo); 186fb4d8502Sjsg kfree(work->shared); 187fb4d8502Sjsg kfree(work); 188fb4d8502Sjsg } 189fb4d8502Sjsg 190fb4d8502Sjsg int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, 191fb4d8502Sjsg struct drm_framebuffer *fb, 192fb4d8502Sjsg struct drm_pending_vblank_event *event, 193fb4d8502Sjsg uint32_t page_flip_flags, uint32_t target, 194fb4d8502Sjsg struct drm_modeset_acquire_ctx *ctx) 195fb4d8502Sjsg { 196fb4d8502Sjsg struct drm_device *dev = crtc->dev; 197ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(dev); 198fb4d8502Sjsg struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 199fb4d8502Sjsg struct drm_gem_object *obj; 200fb4d8502Sjsg struct amdgpu_flip_work *work; 201fb4d8502Sjsg struct amdgpu_bo *new_abo; 202fb4d8502Sjsg unsigned long flags; 203fb4d8502Sjsg u64 tiling_flags; 204fb4d8502Sjsg int i, r; 205fb4d8502Sjsg 20609f85244Sjsg work = kzalloc(sizeof(*work), GFP_KERNEL); 207fb4d8502Sjsg if (work == NULL) 208fb4d8502Sjsg return -ENOMEM; 209fb4d8502Sjsg 210fb4d8502Sjsg INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func); 211fb4d8502Sjsg INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func); 212fb4d8502Sjsg 213fb4d8502Sjsg work->event = event; 214fb4d8502Sjsg work->adev = adev; 215fb4d8502Sjsg work->crtc_id = amdgpu_crtc->crtc_id; 216fb4d8502Sjsg work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; 217fb4d8502Sjsg 218fb4d8502Sjsg /* schedule unpin of the old buffer */ 219fb4d8502Sjsg obj = crtc->primary->fb->obj[0]; 220fb4d8502Sjsg 221fb4d8502Sjsg /* take a reference to the old object */ 222fb4d8502Sjsg work->old_abo = gem_to_amdgpu_bo(obj); 223fb4d8502Sjsg amdgpu_bo_ref(work->old_abo); 224fb4d8502Sjsg 225fb4d8502Sjsg obj = fb->obj[0]; 226fb4d8502Sjsg new_abo = gem_to_amdgpu_bo(obj); 227fb4d8502Sjsg 228fb4d8502Sjsg /* pin the new buffer */ 229fb4d8502Sjsg r = amdgpu_bo_reserve(new_abo, false); 230fb4d8502Sjsg if (unlikely(r != 0)) { 231fb4d8502Sjsg DRM_ERROR("failed to reserve new abo buffer before flip\n"); 232fb4d8502Sjsg goto cleanup; 233fb4d8502Sjsg } 234fb4d8502Sjsg 235c349dbc7Sjsg if (!adev->enable_virtual_display) { 236c349dbc7Sjsg r = amdgpu_bo_pin(new_abo, 237c349dbc7Sjsg amdgpu_display_supported_domains(adev, new_abo->flags)); 238fb4d8502Sjsg if (unlikely(r != 0)) { 239fb4d8502Sjsg DRM_ERROR("failed to pin new abo buffer before flip\n"); 240fb4d8502Sjsg goto unreserve; 241fb4d8502Sjsg } 242c349dbc7Sjsg } 243fb4d8502Sjsg 244fb4d8502Sjsg r = amdgpu_ttm_alloc_gart(&new_abo->tbo); 245fb4d8502Sjsg if (unlikely(r != 0)) { 246fb4d8502Sjsg DRM_ERROR("%p bind failed\n", new_abo); 247fb4d8502Sjsg goto unpin; 248fb4d8502Sjsg } 249fb4d8502Sjsg 2501bb76ff1Sjsg r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE, 2511bb76ff1Sjsg &work->shared_count, 2521bb76ff1Sjsg &work->shared); 253fb4d8502Sjsg if (unlikely(r != 0)) { 254fb4d8502Sjsg DRM_ERROR("failed to get fences for buffer\n"); 255fb4d8502Sjsg goto unpin; 256fb4d8502Sjsg } 257fb4d8502Sjsg 258fb4d8502Sjsg amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 259fb4d8502Sjsg amdgpu_bo_unreserve(new_abo); 260fb4d8502Sjsg 261c349dbc7Sjsg if (!adev->enable_virtual_display) 262fb4d8502Sjsg work->base = amdgpu_bo_gpu_offset(new_abo); 263fb4d8502Sjsg work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 264c349dbc7Sjsg amdgpu_get_vblank_counter_kms(crtc); 265fb4d8502Sjsg 266fb4d8502Sjsg /* we borrow the event spin lock for protecting flip_wrok */ 267fb4d8502Sjsg spin_lock_irqsave(&crtc->dev->event_lock, flags); 268fb4d8502Sjsg if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) { 269fb4d8502Sjsg DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 270fb4d8502Sjsg spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 271fb4d8502Sjsg r = -EBUSY; 272fb4d8502Sjsg goto pflip_cleanup; 273fb4d8502Sjsg } 274fb4d8502Sjsg 275fb4d8502Sjsg amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; 276fb4d8502Sjsg amdgpu_crtc->pflip_works = work; 277fb4d8502Sjsg 278fb4d8502Sjsg 279fb4d8502Sjsg DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", 280fb4d8502Sjsg amdgpu_crtc->crtc_id, amdgpu_crtc, work); 281fb4d8502Sjsg /* update crtc fb */ 282fb4d8502Sjsg crtc->primary->fb = fb; 283fb4d8502Sjsg spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 284fb4d8502Sjsg amdgpu_display_flip_work_func(&work->flip_work.work); 285fb4d8502Sjsg return 0; 286fb4d8502Sjsg 287fb4d8502Sjsg pflip_cleanup: 288fb4d8502Sjsg if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { 289fb4d8502Sjsg DRM_ERROR("failed to reserve new abo in error path\n"); 290fb4d8502Sjsg goto cleanup; 291fb4d8502Sjsg } 292fb4d8502Sjsg unpin: 293c349dbc7Sjsg if (!adev->enable_virtual_display) 2945ca02815Sjsg amdgpu_bo_unpin(new_abo); 295c349dbc7Sjsg 296fb4d8502Sjsg unreserve: 297fb4d8502Sjsg amdgpu_bo_unreserve(new_abo); 298fb4d8502Sjsg 299fb4d8502Sjsg cleanup: 300fb4d8502Sjsg amdgpu_bo_unref(&work->old_abo); 301fb4d8502Sjsg for (i = 0; i < work->shared_count; ++i) 302fb4d8502Sjsg dma_fence_put(work->shared[i]); 303fb4d8502Sjsg kfree(work->shared); 304fb4d8502Sjsg kfree(work); 305fb4d8502Sjsg 306fb4d8502Sjsg return r; 307fb4d8502Sjsg } 308fb4d8502Sjsg 309fb4d8502Sjsg int amdgpu_display_crtc_set_config(struct drm_mode_set *set, 310fb4d8502Sjsg struct drm_modeset_acquire_ctx *ctx) 311fb4d8502Sjsg { 312fb4d8502Sjsg struct drm_device *dev; 313fb4d8502Sjsg struct amdgpu_device *adev; 314fb4d8502Sjsg struct drm_crtc *crtc; 315fb4d8502Sjsg bool active = false; 316fb4d8502Sjsg int ret; 317fb4d8502Sjsg 318fb4d8502Sjsg if (!set || !set->crtc) 319fb4d8502Sjsg return -EINVAL; 320fb4d8502Sjsg 321fb4d8502Sjsg dev = set->crtc->dev; 322fb4d8502Sjsg 323fb4d8502Sjsg ret = pm_runtime_get_sync(dev->dev); 324fb4d8502Sjsg if (ret < 0) 325ad8b1aafSjsg goto out; 326fb4d8502Sjsg 327fb4d8502Sjsg ret = drm_crtc_helper_set_config(set, ctx); 328fb4d8502Sjsg 329fb4d8502Sjsg list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 330fb4d8502Sjsg if (crtc->enabled) 331fb4d8502Sjsg active = true; 332fb4d8502Sjsg 333fb4d8502Sjsg pm_runtime_mark_last_busy(dev->dev); 334fb4d8502Sjsg 335ad8b1aafSjsg adev = drm_to_adev(dev); 336fb4d8502Sjsg /* if we have active crtcs and we don't have a power ref, 33709f85244Sjsg * take the current one 33809f85244Sjsg */ 339fb4d8502Sjsg if (active && !adev->have_disp_power_ref) { 340fb4d8502Sjsg adev->have_disp_power_ref = true; 341fb4d8502Sjsg return ret; 342fb4d8502Sjsg } 34375aeac0bSjsg /* if we have no active crtcs, then go to 34475aeac0bSjsg * drop the power ref we got before 34509f85244Sjsg */ 34675aeac0bSjsg if (!active && adev->have_disp_power_ref) 347fb4d8502Sjsg adev->have_disp_power_ref = false; 348ad8b1aafSjsg out: 349fb4d8502Sjsg /* drop the power reference we got coming in here */ 350fb4d8502Sjsg pm_runtime_put_autosuspend(dev->dev); 351fb4d8502Sjsg return ret; 352fb4d8502Sjsg } 353fb4d8502Sjsg 354fb4d8502Sjsg static const char *encoder_names[41] = { 355fb4d8502Sjsg "NONE", 356fb4d8502Sjsg "INTERNAL_LVDS", 357fb4d8502Sjsg "INTERNAL_TMDS1", 358fb4d8502Sjsg "INTERNAL_TMDS2", 359fb4d8502Sjsg "INTERNAL_DAC1", 360fb4d8502Sjsg "INTERNAL_DAC2", 361fb4d8502Sjsg "INTERNAL_SDVOA", 362fb4d8502Sjsg "INTERNAL_SDVOB", 363fb4d8502Sjsg "SI170B", 364fb4d8502Sjsg "CH7303", 365fb4d8502Sjsg "CH7301", 366fb4d8502Sjsg "INTERNAL_DVO1", 367fb4d8502Sjsg "EXTERNAL_SDVOA", 368fb4d8502Sjsg "EXTERNAL_SDVOB", 369fb4d8502Sjsg "TITFP513", 370fb4d8502Sjsg "INTERNAL_LVTM1", 371fb4d8502Sjsg "VT1623", 372fb4d8502Sjsg "HDMI_SI1930", 373fb4d8502Sjsg "HDMI_INTERNAL", 374fb4d8502Sjsg "INTERNAL_KLDSCP_TMDS1", 375fb4d8502Sjsg "INTERNAL_KLDSCP_DVO1", 376fb4d8502Sjsg "INTERNAL_KLDSCP_DAC1", 377fb4d8502Sjsg "INTERNAL_KLDSCP_DAC2", 378fb4d8502Sjsg "SI178", 379fb4d8502Sjsg "MVPU_FPGA", 380fb4d8502Sjsg "INTERNAL_DDI", 381fb4d8502Sjsg "VT1625", 382fb4d8502Sjsg "HDMI_SI1932", 383fb4d8502Sjsg "DP_AN9801", 384fb4d8502Sjsg "DP_DP501", 385fb4d8502Sjsg "INTERNAL_UNIPHY", 386fb4d8502Sjsg "INTERNAL_KLDSCP_LVTMA", 387fb4d8502Sjsg "INTERNAL_UNIPHY1", 388fb4d8502Sjsg "INTERNAL_UNIPHY2", 389fb4d8502Sjsg "NUTMEG", 390fb4d8502Sjsg "TRAVIS", 391fb4d8502Sjsg "INTERNAL_VCE", 392fb4d8502Sjsg "INTERNAL_UNIPHY3", 393fb4d8502Sjsg "HDMI_ANX9805", 394fb4d8502Sjsg "INTERNAL_AMCLK", 395fb4d8502Sjsg "VIRTUAL", 396fb4d8502Sjsg }; 397fb4d8502Sjsg 398fb4d8502Sjsg static const char *hpd_names[6] = { 399fb4d8502Sjsg "HPD1", 400fb4d8502Sjsg "HPD2", 401fb4d8502Sjsg "HPD3", 402fb4d8502Sjsg "HPD4", 403fb4d8502Sjsg "HPD5", 404fb4d8502Sjsg "HPD6", 405fb4d8502Sjsg }; 406fb4d8502Sjsg 407fb4d8502Sjsg void amdgpu_display_print_display_setup(struct drm_device *dev) 408fb4d8502Sjsg { 409fb4d8502Sjsg struct drm_connector *connector; 410fb4d8502Sjsg struct amdgpu_connector *amdgpu_connector; 411fb4d8502Sjsg struct drm_encoder *encoder; 412fb4d8502Sjsg struct amdgpu_encoder *amdgpu_encoder; 413c349dbc7Sjsg struct drm_connector_list_iter iter; 414fb4d8502Sjsg uint32_t devices; 415fb4d8502Sjsg int i = 0; 416fb4d8502Sjsg 417c349dbc7Sjsg drm_connector_list_iter_begin(dev, &iter); 418fb4d8502Sjsg DRM_INFO("AMDGPU Display Connectors\n"); 419c349dbc7Sjsg drm_for_each_connector_iter(connector, &iter) { 420fb4d8502Sjsg amdgpu_connector = to_amdgpu_connector(connector); 421fb4d8502Sjsg DRM_INFO("Connector %d:\n", i); 422fb4d8502Sjsg DRM_INFO(" %s\n", connector->name); 423fb4d8502Sjsg if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) 424fb4d8502Sjsg DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]); 425fb4d8502Sjsg if (amdgpu_connector->ddc_bus) { 426fb4d8502Sjsg DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 427fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.mask_clk_reg, 428fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.mask_data_reg, 429fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.a_clk_reg, 430fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.a_data_reg, 431fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.en_clk_reg, 432fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.en_data_reg, 433fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.y_clk_reg, 434fb4d8502Sjsg amdgpu_connector->ddc_bus->rec.y_data_reg); 435fb4d8502Sjsg if (amdgpu_connector->router.ddc_valid) 436fb4d8502Sjsg DRM_INFO(" DDC Router 0x%x/0x%x\n", 437fb4d8502Sjsg amdgpu_connector->router.ddc_mux_control_pin, 438fb4d8502Sjsg amdgpu_connector->router.ddc_mux_state); 439fb4d8502Sjsg if (amdgpu_connector->router.cd_valid) 440fb4d8502Sjsg DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", 441fb4d8502Sjsg amdgpu_connector->router.cd_mux_control_pin, 442fb4d8502Sjsg amdgpu_connector->router.cd_mux_state); 443fb4d8502Sjsg } else { 444fb4d8502Sjsg if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 445fb4d8502Sjsg connector->connector_type == DRM_MODE_CONNECTOR_DVII || 446fb4d8502Sjsg connector->connector_type == DRM_MODE_CONNECTOR_DVID || 447fb4d8502Sjsg connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 448fb4d8502Sjsg connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 449fb4d8502Sjsg connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 450fb4d8502Sjsg DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 451fb4d8502Sjsg } 452fb4d8502Sjsg DRM_INFO(" Encoders:\n"); 453fb4d8502Sjsg list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 454fb4d8502Sjsg amdgpu_encoder = to_amdgpu_encoder(encoder); 455fb4d8502Sjsg devices = amdgpu_encoder->devices & amdgpu_connector->devices; 456fb4d8502Sjsg if (devices) { 457fb4d8502Sjsg if (devices & ATOM_DEVICE_CRT1_SUPPORT) 458fb4d8502Sjsg DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 459fb4d8502Sjsg if (devices & ATOM_DEVICE_CRT2_SUPPORT) 460fb4d8502Sjsg DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 461fb4d8502Sjsg if (devices & ATOM_DEVICE_LCD1_SUPPORT) 462fb4d8502Sjsg DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 463fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP1_SUPPORT) 464fb4d8502Sjsg DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 465fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP2_SUPPORT) 466fb4d8502Sjsg DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 467fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP3_SUPPORT) 468fb4d8502Sjsg DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 469fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP4_SUPPORT) 470fb4d8502Sjsg DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 471fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP5_SUPPORT) 472fb4d8502Sjsg DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 473fb4d8502Sjsg if (devices & ATOM_DEVICE_DFP6_SUPPORT) 474fb4d8502Sjsg DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 475fb4d8502Sjsg if (devices & ATOM_DEVICE_TV1_SUPPORT) 476fb4d8502Sjsg DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 477fb4d8502Sjsg if (devices & ATOM_DEVICE_CV_SUPPORT) 478fb4d8502Sjsg DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]); 479fb4d8502Sjsg } 480fb4d8502Sjsg } 481fb4d8502Sjsg i++; 482fb4d8502Sjsg } 483c349dbc7Sjsg drm_connector_list_iter_end(&iter); 484fb4d8502Sjsg } 485fb4d8502Sjsg 486fb4d8502Sjsg bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, 487fb4d8502Sjsg bool use_aux) 488fb4d8502Sjsg { 489fb4d8502Sjsg u8 out = 0x0; 490fb4d8502Sjsg u8 buf[8]; 491fb4d8502Sjsg int ret; 492fb4d8502Sjsg struct i2c_msg msgs[] = { 493fb4d8502Sjsg { 494fb4d8502Sjsg .addr = DDC_ADDR, 495fb4d8502Sjsg .flags = 0, 496fb4d8502Sjsg .len = 1, 497fb4d8502Sjsg .buf = &out, 498fb4d8502Sjsg }, 499fb4d8502Sjsg { 500fb4d8502Sjsg .addr = DDC_ADDR, 501fb4d8502Sjsg .flags = I2C_M_RD, 502fb4d8502Sjsg .len = 8, 503fb4d8502Sjsg .buf = buf, 504fb4d8502Sjsg } 505fb4d8502Sjsg }; 506fb4d8502Sjsg 507fb4d8502Sjsg /* on hw with routers, select right port */ 508fb4d8502Sjsg if (amdgpu_connector->router.ddc_valid) 509fb4d8502Sjsg amdgpu_i2c_router_select_ddc_port(amdgpu_connector); 510fb4d8502Sjsg 51109f85244Sjsg if (use_aux) 512fb4d8502Sjsg ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); 51309f85244Sjsg else 514fb4d8502Sjsg ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); 515fb4d8502Sjsg 516fb4d8502Sjsg if (ret != 2) 517fb4d8502Sjsg /* Couldn't find an accessible DDC on this connector */ 518fb4d8502Sjsg return false; 519fb4d8502Sjsg /* Probe also for valid EDID header 520fb4d8502Sjsg * EDID header starts with: 521fb4d8502Sjsg * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. 522fb4d8502Sjsg * Only the first 6 bytes must be valid as 52309f85244Sjsg * drm_edid_block_valid() can fix the last 2 bytes 52409f85244Sjsg */ 525fb4d8502Sjsg if (drm_edid_header_is_valid(buf) < 6) { 526fb4d8502Sjsg /* Couldn't find an accessible EDID on this 52709f85244Sjsg * connector 52809f85244Sjsg */ 529fb4d8502Sjsg return false; 530fb4d8502Sjsg } 531fb4d8502Sjsg return true; 532fb4d8502Sjsg } 533fb4d8502Sjsg 5347cddb3e7Sjsg static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file, 5357cddb3e7Sjsg unsigned int flags, unsigned int color, 5367cddb3e7Sjsg struct drm_clip_rect *clips, unsigned int num_clips) 5377cddb3e7Sjsg { 5387cddb3e7Sjsg 5397cddb3e7Sjsg if (file) 5407cddb3e7Sjsg return -ENOSYS; 5417cddb3e7Sjsg 5427cddb3e7Sjsg return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips, 5437cddb3e7Sjsg num_clips); 5447cddb3e7Sjsg } 5457cddb3e7Sjsg 546fb4d8502Sjsg static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { 547fb4d8502Sjsg .destroy = drm_gem_fb_destroy, 548fb4d8502Sjsg .create_handle = drm_gem_fb_create_handle, 5499b2ac6eeSjsg }; 5509b2ac6eeSjsg 5517cddb3e7Sjsg static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = { 5527cddb3e7Sjsg .destroy = drm_gem_fb_destroy, 5537cddb3e7Sjsg .create_handle = drm_gem_fb_create_handle, 5547cddb3e7Sjsg .dirty = amdgpu_dirtyfb 5557cddb3e7Sjsg }; 5567cddb3e7Sjsg 557c349dbc7Sjsg uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, 558c349dbc7Sjsg uint64_t bo_flags) 559fb4d8502Sjsg { 560fb4d8502Sjsg uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 561fb4d8502Sjsg 562fb4d8502Sjsg #if defined(CONFIG_DRM_AMD_DC) 563c349dbc7Sjsg /* 564c349dbc7Sjsg * if amdgpu_bo_support_uswc returns false it means that USWC mappings 565c349dbc7Sjsg * is not supported for this board. But this mapping is required 566c349dbc7Sjsg * to avoid hang caused by placement of scanout BO in GTT on certain 567c349dbc7Sjsg * APUs. So force the BO placement to VRAM in case this architecture 568c349dbc7Sjsg * will not allow USWC mappings. 5695ca02815Sjsg * Also, don't allow GTT domain if the BO doesn't have USWC flag set. 570c349dbc7Sjsg */ 571c349dbc7Sjsg if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && 572c349dbc7Sjsg amdgpu_bo_support_uswc(bo_flags) && 573f005ef32Sjsg adev->dc_enabled && 5741bb76ff1Sjsg adev->mode_info.gpu_vm_support) 575fb4d8502Sjsg domain |= AMDGPU_GEM_DOMAIN_GTT; 576fb4d8502Sjsg #endif 577fb4d8502Sjsg 578fb4d8502Sjsg return domain; 579fb4d8502Sjsg } 580fb4d8502Sjsg 5815ca02815Sjsg static const struct drm_format_info dcc_formats[] = { 5825ca02815Sjsg { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 5835ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 5845ca02815Sjsg { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 5855ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 5865ca02815Sjsg { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 5875ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 5885ca02815Sjsg .has_alpha = true, }, 5895ca02815Sjsg { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 5905ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 5915ca02815Sjsg .has_alpha = true, }, 5925ca02815Sjsg { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2, 5935ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 5945ca02815Sjsg .has_alpha = true, }, 5955ca02815Sjsg { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, 5965ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 5975ca02815Sjsg { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, 5985ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 5995ca02815Sjsg { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, 6005ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6015ca02815Sjsg .has_alpha = true, }, 6025ca02815Sjsg { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, 6035ca02815Sjsg .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6045ca02815Sjsg .has_alpha = true, }, 6055ca02815Sjsg { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2, 6065ca02815Sjsg .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6075ca02815Sjsg }; 6085ca02815Sjsg 6095ca02815Sjsg static const struct drm_format_info dcc_retile_formats[] = { 6105ca02815Sjsg { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, 6115ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6125ca02815Sjsg { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, 6135ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6145ca02815Sjsg { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, 6155ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6165ca02815Sjsg .has_alpha = true, }, 6175ca02815Sjsg { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, 6185ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6195ca02815Sjsg .has_alpha = true, }, 6205ca02815Sjsg { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3, 6215ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6225ca02815Sjsg .has_alpha = true, }, 6235ca02815Sjsg { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3, 6245ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6255ca02815Sjsg { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3, 6265ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6275ca02815Sjsg { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3, 6285ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6295ca02815Sjsg .has_alpha = true, }, 6305ca02815Sjsg { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3, 6315ca02815Sjsg .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, 6325ca02815Sjsg .has_alpha = true, }, 6335ca02815Sjsg { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3, 6345ca02815Sjsg .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, 6355ca02815Sjsg }; 6365ca02815Sjsg 6375ca02815Sjsg static const struct drm_format_info * 6385ca02815Sjsg lookup_format_info(const struct drm_format_info formats[], 6395ca02815Sjsg int num_formats, u32 format) 6405ca02815Sjsg { 6415ca02815Sjsg int i; 6425ca02815Sjsg 6435ca02815Sjsg for (i = 0; i < num_formats; i++) { 6445ca02815Sjsg if (formats[i].format == format) 6455ca02815Sjsg return &formats[i]; 6465ca02815Sjsg } 6475ca02815Sjsg 6485ca02815Sjsg return NULL; 6495ca02815Sjsg } 6505ca02815Sjsg 6515ca02815Sjsg const struct drm_format_info * 6525ca02815Sjsg amdgpu_lookup_format_info(u32 format, uint64_t modifier) 6535ca02815Sjsg { 6545ca02815Sjsg if (!IS_AMD_FMT_MOD(modifier)) 6555ca02815Sjsg return NULL; 6565ca02815Sjsg 6575ca02815Sjsg if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) 6585ca02815Sjsg return lookup_format_info(dcc_retile_formats, 6595ca02815Sjsg ARRAY_SIZE(dcc_retile_formats), 6605ca02815Sjsg format); 6615ca02815Sjsg 6625ca02815Sjsg if (AMD_FMT_MOD_GET(DCC, modifier)) 6635ca02815Sjsg return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats), 6645ca02815Sjsg format); 6655ca02815Sjsg 6665ca02815Sjsg /* returning NULL will cause the default format structs to be used. */ 6675ca02815Sjsg return NULL; 6685ca02815Sjsg } 6695ca02815Sjsg 6705ca02815Sjsg 6715ca02815Sjsg /* 6725ca02815Sjsg * Tries to extract the renderable DCC offset from the opaque metadata attached 6735ca02815Sjsg * to the buffer. 6745ca02815Sjsg */ 6755ca02815Sjsg static int 6765ca02815Sjsg extract_render_dcc_offset(struct amdgpu_device *adev, 6775ca02815Sjsg struct drm_gem_object *obj, 6785ca02815Sjsg uint64_t *offset) 6795ca02815Sjsg { 6805ca02815Sjsg struct amdgpu_bo *rbo; 6815ca02815Sjsg int r = 0; 6825ca02815Sjsg uint32_t metadata[10]; /* Something that fits a descriptor + header. */ 6835ca02815Sjsg uint32_t size; 6845ca02815Sjsg 6855ca02815Sjsg rbo = gem_to_amdgpu_bo(obj); 6865ca02815Sjsg r = amdgpu_bo_reserve(rbo, false); 6875ca02815Sjsg 6885ca02815Sjsg if (unlikely(r)) { 6895ca02815Sjsg /* Don't show error message when returning -ERESTARTSYS */ 6905ca02815Sjsg if (r != -ERESTARTSYS) 6915ca02815Sjsg DRM_ERROR("Unable to reserve buffer: %d\n", r); 6925ca02815Sjsg return r; 6935ca02815Sjsg } 6945ca02815Sjsg 6955ca02815Sjsg r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL); 6965ca02815Sjsg amdgpu_bo_unreserve(rbo); 6975ca02815Sjsg 6985ca02815Sjsg if (r) 6995ca02815Sjsg return r; 7005ca02815Sjsg 7015ca02815Sjsg /* 7025ca02815Sjsg * The first word is the metadata version, and we need space for at least 7035ca02815Sjsg * the version + pci vendor+device id + 8 words for a descriptor. 7045ca02815Sjsg */ 7055ca02815Sjsg if (size < 40 || metadata[0] != 1) 7065ca02815Sjsg return -EINVAL; 7075ca02815Sjsg 7085ca02815Sjsg if (adev->family >= AMDGPU_FAMILY_NV) { 7095ca02815Sjsg /* resource word 6/7 META_DATA_ADDRESS{_LO} */ 7105ca02815Sjsg *offset = ((u64)metadata[9] << 16u) | 7115ca02815Sjsg ((metadata[8] & 0xFF000000u) >> 16); 7125ca02815Sjsg } else { 7135ca02815Sjsg /* resource word 5/7 META_DATA_ADDRESS */ 7145ca02815Sjsg *offset = ((u64)metadata[9] << 8u) | 7155ca02815Sjsg ((u64)(metadata[7] & 0x1FE0000u) << 23); 7165ca02815Sjsg } 7175ca02815Sjsg 7185ca02815Sjsg return 0; 7195ca02815Sjsg } 7205ca02815Sjsg 7215ca02815Sjsg static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) 7225ca02815Sjsg { 7235ca02815Sjsg struct amdgpu_device *adev = drm_to_adev(afb->base.dev); 7245ca02815Sjsg uint64_t modifier = 0; 7251bb76ff1Sjsg int num_pipes = 0; 7261bb76ff1Sjsg int num_pkrs = 0; 7271bb76ff1Sjsg 7281bb76ff1Sjsg num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 7291bb76ff1Sjsg num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; 7305ca02815Sjsg 7315ca02815Sjsg if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) { 7325ca02815Sjsg modifier = DRM_FORMAT_MOD_LINEAR; 7335ca02815Sjsg } else { 7345ca02815Sjsg int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE); 7355ca02815Sjsg bool has_xor = swizzle >= 16; 7365ca02815Sjsg int block_size_bits; 7375ca02815Sjsg int version; 7385ca02815Sjsg int pipe_xor_bits = 0; 7395ca02815Sjsg int bank_xor_bits = 0; 7405ca02815Sjsg int packers = 0; 7415ca02815Sjsg int rb = 0; 7421bb76ff1Sjsg int pipes = ilog2(num_pipes); 7435ca02815Sjsg uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B); 7445ca02815Sjsg 7455ca02815Sjsg switch (swizzle >> 2) { 7465ca02815Sjsg case 0: /* 256B */ 7475ca02815Sjsg block_size_bits = 8; 7485ca02815Sjsg break; 7495ca02815Sjsg case 1: /* 4KiB */ 7505ca02815Sjsg case 5: /* 4KiB _X */ 7515ca02815Sjsg block_size_bits = 12; 7525ca02815Sjsg break; 7535ca02815Sjsg case 2: /* 64KiB */ 7545ca02815Sjsg case 4: /* 64 KiB _T */ 7555ca02815Sjsg case 6: /* 64 KiB _X */ 7565ca02815Sjsg block_size_bits = 16; 7575ca02815Sjsg break; 7581bb76ff1Sjsg case 7: /* 256 KiB */ 7591bb76ff1Sjsg block_size_bits = 18; 7601bb76ff1Sjsg break; 7615ca02815Sjsg default: 7625ca02815Sjsg /* RESERVED or VAR */ 7635ca02815Sjsg return -EINVAL; 7645ca02815Sjsg } 7655ca02815Sjsg 7661bb76ff1Sjsg if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) 7671bb76ff1Sjsg version = AMD_FMT_MOD_TILE_VER_GFX11; 7681bb76ff1Sjsg else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 7695ca02815Sjsg version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; 7701bb76ff1Sjsg else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) 7715ca02815Sjsg version = AMD_FMT_MOD_TILE_VER_GFX10; 7725ca02815Sjsg else 7735ca02815Sjsg version = AMD_FMT_MOD_TILE_VER_GFX9; 7745ca02815Sjsg 7755ca02815Sjsg switch (swizzle & 3) { 7765ca02815Sjsg case 0: /* Z microtiling */ 7775ca02815Sjsg return -EINVAL; 7785ca02815Sjsg case 1: /* S microtiling */ 7791bb76ff1Sjsg if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { 7805ca02815Sjsg if (!has_xor) 7815ca02815Sjsg version = AMD_FMT_MOD_TILE_VER_GFX9; 7821bb76ff1Sjsg } 7835ca02815Sjsg break; 7845ca02815Sjsg case 2: 7851bb76ff1Sjsg if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { 7865ca02815Sjsg if (!has_xor && afb->base.format->cpp[0] != 4) 7875ca02815Sjsg version = AMD_FMT_MOD_TILE_VER_GFX9; 7881bb76ff1Sjsg } 7895ca02815Sjsg break; 7905ca02815Sjsg case 3: 7915ca02815Sjsg break; 7925ca02815Sjsg } 7935ca02815Sjsg 7945ca02815Sjsg if (has_xor) { 7951bb76ff1Sjsg if (num_pipes == num_pkrs && num_pkrs == 0) { 7961bb76ff1Sjsg DRM_ERROR("invalid number of pipes and packers\n"); 7971bb76ff1Sjsg return -EINVAL; 7981bb76ff1Sjsg } 7991bb76ff1Sjsg 8005ca02815Sjsg switch (version) { 8011bb76ff1Sjsg case AMD_FMT_MOD_TILE_VER_GFX11: 8021bb76ff1Sjsg pipe_xor_bits = min(block_size_bits - 8, pipes); 8031bb76ff1Sjsg packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 8041bb76ff1Sjsg break; 8055ca02815Sjsg case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: 8065ca02815Sjsg pipe_xor_bits = min(block_size_bits - 8, pipes); 8075ca02815Sjsg packers = min(block_size_bits - 8 - pipe_xor_bits, 8085ca02815Sjsg ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs)); 8095ca02815Sjsg break; 8105ca02815Sjsg case AMD_FMT_MOD_TILE_VER_GFX10: 8115ca02815Sjsg pipe_xor_bits = min(block_size_bits - 8, pipes); 8125ca02815Sjsg break; 8135ca02815Sjsg case AMD_FMT_MOD_TILE_VER_GFX9: 8145ca02815Sjsg rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 8155ca02815Sjsg ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 8165ca02815Sjsg pipe_xor_bits = min(block_size_bits - 8, pipes + 8175ca02815Sjsg ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 8185ca02815Sjsg bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits, 8195ca02815Sjsg ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 8205ca02815Sjsg break; 8215ca02815Sjsg } 8225ca02815Sjsg } 8235ca02815Sjsg 8245ca02815Sjsg modifier = AMD_FMT_MOD | 8255ca02815Sjsg AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) | 8265ca02815Sjsg AMD_FMT_MOD_SET(TILE_VERSION, version) | 8275ca02815Sjsg AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 8285ca02815Sjsg AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 8295ca02815Sjsg AMD_FMT_MOD_SET(PACKERS, packers); 8305ca02815Sjsg 8315ca02815Sjsg if (dcc_offset != 0) { 8325ca02815Sjsg bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0; 8335ca02815Sjsg bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; 8345ca02815Sjsg const struct drm_format_info *format_info; 8355ca02815Sjsg u64 render_dcc_offset; 8365ca02815Sjsg 8375ca02815Sjsg /* Enable constant encode on RAVEN2 and later. */ 8381bb76ff1Sjsg bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN || 8395ca02815Sjsg (adev->asic_type == CHIP_RAVEN && 8401bb76ff1Sjsg adev->external_rev_id >= 0x81)) && 8411bb76ff1Sjsg adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0); 8425ca02815Sjsg 8435ca02815Sjsg int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B : 8445ca02815Sjsg dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B : 8455ca02815Sjsg AMD_FMT_MOD_DCC_BLOCK_256B; 8465ca02815Sjsg 8475ca02815Sjsg modifier |= AMD_FMT_MOD_SET(DCC, 1) | 8485ca02815Sjsg AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) | 8495ca02815Sjsg AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) | 8505ca02815Sjsg AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) | 8515ca02815Sjsg AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size); 8525ca02815Sjsg 8535ca02815Sjsg afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0]; 8545ca02815Sjsg afb->base.pitches[1] = 8555ca02815Sjsg AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1; 8565ca02815Sjsg 8575ca02815Sjsg /* 8585ca02815Sjsg * If the userspace driver uses retiling the tiling flags do not contain 8595ca02815Sjsg * info on the renderable DCC buffer. Luckily the opaque metadata contains 8605ca02815Sjsg * the info so we can try to extract it. The kernel does not use this info 8615ca02815Sjsg * but we should convert it to a modifier plane for getfb2, so the 8625ca02815Sjsg * userspace driver that gets it doesn't have to juggle around another DCC 8635ca02815Sjsg * plane internally. 8645ca02815Sjsg */ 8655ca02815Sjsg if (extract_render_dcc_offset(adev, afb->base.obj[0], 8665ca02815Sjsg &render_dcc_offset) == 0 && 8675ca02815Sjsg render_dcc_offset != 0 && 8685ca02815Sjsg render_dcc_offset != afb->base.offsets[1] && 8695ca02815Sjsg render_dcc_offset < UINT_MAX) { 8705ca02815Sjsg uint32_t dcc_block_bits; /* of base surface data */ 8715ca02815Sjsg 8725ca02815Sjsg modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1); 8735ca02815Sjsg afb->base.offsets[2] = render_dcc_offset; 8745ca02815Sjsg 8755ca02815Sjsg if (adev->family >= AMDGPU_FAMILY_NV) { 8765ca02815Sjsg int extra_pipe = 0; 8775ca02815Sjsg 8781bb76ff1Sjsg if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) && 8795ca02815Sjsg pipes == packers && pipes > 1) 8805ca02815Sjsg extra_pipe = 1; 8815ca02815Sjsg 8825ca02815Sjsg dcc_block_bits = max(20, 16 + pipes + extra_pipe); 8835ca02815Sjsg } else { 8845ca02815Sjsg modifier |= AMD_FMT_MOD_SET(RB, rb) | 8855ca02815Sjsg AMD_FMT_MOD_SET(PIPE, pipes); 8865ca02815Sjsg dcc_block_bits = max(20, 18 + rb); 8875ca02815Sjsg } 8885ca02815Sjsg 8895ca02815Sjsg dcc_block_bits -= ilog2(afb->base.format->cpp[0]); 890f005ef32Sjsg afb->base.pitches[2] = ALIGN(afb->base.width, 8915ca02815Sjsg 1u << ((dcc_block_bits + 1) / 2)); 8925ca02815Sjsg } 8935ca02815Sjsg format_info = amdgpu_lookup_format_info(afb->base.format->format, 8945ca02815Sjsg modifier); 8955ca02815Sjsg if (!format_info) 8965ca02815Sjsg return -EINVAL; 8975ca02815Sjsg 8985ca02815Sjsg afb->base.format = format_info; 8995ca02815Sjsg } 9005ca02815Sjsg } 9015ca02815Sjsg 9025ca02815Sjsg afb->base.modifier = modifier; 9035ca02815Sjsg afb->base.flags |= DRM_MODE_FB_MODIFIERS; 9045ca02815Sjsg return 0; 9055ca02815Sjsg } 9065ca02815Sjsg 9075ca02815Sjsg /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */ 9085ca02815Sjsg static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) 9095ca02815Sjsg { 9105ca02815Sjsg u64 micro_tile_mode; 9115ca02815Sjsg 9126b2fcb1bSjsg if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */ 9135ca02815Sjsg return 0; 9145ca02815Sjsg 9155ca02815Sjsg micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); 9165ca02815Sjsg switch (micro_tile_mode) { 9175ca02815Sjsg case 0: /* DISPLAY */ 9185ca02815Sjsg case 3: /* RENDER */ 9195ca02815Sjsg return 0; 9205ca02815Sjsg default: 9215ca02815Sjsg drm_dbg_kms(afb->base.dev, 9225ca02815Sjsg "Micro tile mode %llu not supported for scanout\n", 9235ca02815Sjsg micro_tile_mode); 9245ca02815Sjsg return -EINVAL; 9255ca02815Sjsg } 9265ca02815Sjsg } 9275ca02815Sjsg 9285ca02815Sjsg static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, 9295ca02815Sjsg unsigned int *width, unsigned int *height) 9305ca02815Sjsg { 9315ca02815Sjsg unsigned int cpp_log2 = ilog2(cpp); 9325ca02815Sjsg unsigned int pixel_log2 = block_log2 - cpp_log2; 9335ca02815Sjsg unsigned int width_log2 = (pixel_log2 + 1) / 2; 9345ca02815Sjsg unsigned int height_log2 = pixel_log2 - width_log2; 9355ca02815Sjsg 9365ca02815Sjsg *width = 1 << width_log2; 9375ca02815Sjsg *height = 1 << height_log2; 9385ca02815Sjsg } 9395ca02815Sjsg 9405ca02815Sjsg static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned, 9415ca02815Sjsg bool pipe_aligned) 9425ca02815Sjsg { 9435ca02815Sjsg unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier); 9445ca02815Sjsg 9455ca02815Sjsg switch (ver) { 9465ca02815Sjsg case AMD_FMT_MOD_TILE_VER_GFX9: { 9475ca02815Sjsg /* 9485ca02815Sjsg * TODO: for pipe aligned we may need to check the alignment of the 9495ca02815Sjsg * total size of the surface, which may need to be bigger than the 9505ca02815Sjsg * natural alignment due to some HW workarounds 9515ca02815Sjsg */ 9525ca02815Sjsg return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12); 9535ca02815Sjsg } 9545ca02815Sjsg case AMD_FMT_MOD_TILE_VER_GFX10: 9551bb76ff1Sjsg case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: 9561bb76ff1Sjsg case AMD_FMT_MOD_TILE_VER_GFX11: { 9575ca02815Sjsg int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 9585ca02815Sjsg 9591bb76ff1Sjsg if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 && 9605ca02815Sjsg AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2) 9615ca02815Sjsg ++pipes_log2; 9625ca02815Sjsg 9635ca02815Sjsg return max(8 + (pipe_aligned ? pipes_log2 : 0), 12); 9645ca02815Sjsg } 9655ca02815Sjsg default: 9665ca02815Sjsg return 0; 9675ca02815Sjsg } 9685ca02815Sjsg } 9695ca02815Sjsg 9705ca02815Sjsg static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane, 9715ca02815Sjsg const struct drm_format_info *format, 9725ca02815Sjsg unsigned int block_width, unsigned int block_height, 9735ca02815Sjsg unsigned int block_size_log2) 9745ca02815Sjsg { 9755ca02815Sjsg unsigned int width = rfb->base.width / 9765ca02815Sjsg ((plane && plane < format->num_planes) ? format->hsub : 1); 9775ca02815Sjsg unsigned int height = rfb->base.height / 9785ca02815Sjsg ((plane && plane < format->num_planes) ? format->vsub : 1); 9795ca02815Sjsg unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1; 9805ca02815Sjsg unsigned int block_pitch = block_width * cpp; 981f005ef32Sjsg unsigned int min_pitch = ALIGN(width * cpp, block_pitch); 9825ca02815Sjsg unsigned int block_size = 1 << block_size_log2; 9835ca02815Sjsg uint64_t size; 9845ca02815Sjsg 9855ca02815Sjsg if (rfb->base.pitches[plane] % block_pitch) { 9865ca02815Sjsg drm_dbg_kms(rfb->base.dev, 9875ca02815Sjsg "pitch %d for plane %d is not a multiple of block pitch %d\n", 9885ca02815Sjsg rfb->base.pitches[plane], plane, block_pitch); 9895ca02815Sjsg return -EINVAL; 9905ca02815Sjsg } 9915ca02815Sjsg if (rfb->base.pitches[plane] < min_pitch) { 9925ca02815Sjsg drm_dbg_kms(rfb->base.dev, 9935ca02815Sjsg "pitch %d for plane %d is less than minimum pitch %d\n", 9945ca02815Sjsg rfb->base.pitches[plane], plane, min_pitch); 9955ca02815Sjsg return -EINVAL; 9965ca02815Sjsg } 9975ca02815Sjsg 9985ca02815Sjsg /* Force at least natural alignment. */ 9995ca02815Sjsg if (rfb->base.offsets[plane] % block_size) { 10005ca02815Sjsg drm_dbg_kms(rfb->base.dev, 10015ca02815Sjsg "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n", 10025ca02815Sjsg rfb->base.offsets[plane], plane, block_size); 10035ca02815Sjsg return -EINVAL; 10045ca02815Sjsg } 10055ca02815Sjsg 10065ca02815Sjsg size = rfb->base.offsets[plane] + 10075ca02815Sjsg (uint64_t)rfb->base.pitches[plane] / block_pitch * 10085ca02815Sjsg block_size * DIV_ROUND_UP(height, block_height); 10095ca02815Sjsg 10105ca02815Sjsg if (rfb->base.obj[0]->size < size) { 10115ca02815Sjsg drm_dbg_kms(rfb->base.dev, 10125ca02815Sjsg "BO size 0x%zx is less than 0x%llx required for plane %d\n", 10135ca02815Sjsg rfb->base.obj[0]->size, size, plane); 10145ca02815Sjsg return -EINVAL; 10155ca02815Sjsg } 10165ca02815Sjsg 10175ca02815Sjsg return 0; 10185ca02815Sjsg } 10195ca02815Sjsg 10205ca02815Sjsg 10215ca02815Sjsg static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) 10225ca02815Sjsg { 10235ca02815Sjsg const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format); 10245ca02815Sjsg uint64_t modifier = rfb->base.modifier; 10255ca02815Sjsg int ret; 10265ca02815Sjsg unsigned int i, block_width, block_height, block_size_log2; 10275ca02815Sjsg 10281bb76ff1Sjsg if (rfb->base.dev->mode_config.fb_modifiers_not_supported) 10295ca02815Sjsg return 0; 10305ca02815Sjsg 10315ca02815Sjsg for (i = 0; i < format_info->num_planes; ++i) { 10325ca02815Sjsg if (modifier == DRM_FORMAT_MOD_LINEAR) { 10335ca02815Sjsg block_width = 256 / format_info->cpp[i]; 10345ca02815Sjsg block_height = 1; 10355ca02815Sjsg block_size_log2 = 8; 1036*7ac8356aSjsg } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) { 1037*7ac8356aSjsg int swizzle = AMD_FMT_MOD_GET(TILE, modifier); 1038*7ac8356aSjsg 1039*7ac8356aSjsg switch (swizzle) { 1040*7ac8356aSjsg case AMD_FMT_MOD_TILE_GFX12_256B_2D: 1041*7ac8356aSjsg block_size_log2 = 8; 1042*7ac8356aSjsg break; 1043*7ac8356aSjsg case AMD_FMT_MOD_TILE_GFX12_4K_2D: 1044*7ac8356aSjsg block_size_log2 = 12; 1045*7ac8356aSjsg break; 1046*7ac8356aSjsg case AMD_FMT_MOD_TILE_GFX12_64K_2D: 1047*7ac8356aSjsg block_size_log2 = 16; 1048*7ac8356aSjsg break; 1049*7ac8356aSjsg case AMD_FMT_MOD_TILE_GFX12_256K_2D: 1050*7ac8356aSjsg block_size_log2 = 18; 1051*7ac8356aSjsg break; 1052*7ac8356aSjsg default: 1053*7ac8356aSjsg drm_dbg_kms(rfb->base.dev, 1054*7ac8356aSjsg "Gfx12 swizzle mode with unknown block size: %d\n", swizzle); 1055*7ac8356aSjsg return -EINVAL; 1056*7ac8356aSjsg } 1057*7ac8356aSjsg 1058*7ac8356aSjsg get_block_dimensions(block_size_log2, format_info->cpp[i], 1059*7ac8356aSjsg &block_width, &block_height); 10605ca02815Sjsg } else { 10615ca02815Sjsg int swizzle = AMD_FMT_MOD_GET(TILE, modifier); 10625ca02815Sjsg 10635ca02815Sjsg switch ((swizzle & ~3) + 1) { 10645ca02815Sjsg case DC_SW_256B_S: 10655ca02815Sjsg block_size_log2 = 8; 10665ca02815Sjsg break; 10675ca02815Sjsg case DC_SW_4KB_S: 10685ca02815Sjsg case DC_SW_4KB_S_X: 10695ca02815Sjsg block_size_log2 = 12; 10705ca02815Sjsg break; 10715ca02815Sjsg case DC_SW_64KB_S: 10725ca02815Sjsg case DC_SW_64KB_S_T: 10735ca02815Sjsg case DC_SW_64KB_S_X: 10745ca02815Sjsg block_size_log2 = 16; 10755ca02815Sjsg break; 10761bb76ff1Sjsg case DC_SW_VAR_S_X: 10771bb76ff1Sjsg block_size_log2 = 18; 10781bb76ff1Sjsg break; 10795ca02815Sjsg default: 10805ca02815Sjsg drm_dbg_kms(rfb->base.dev, 10815ca02815Sjsg "Swizzle mode with unknown block size: %d\n", swizzle); 10825ca02815Sjsg return -EINVAL; 10835ca02815Sjsg } 10845ca02815Sjsg 10855ca02815Sjsg get_block_dimensions(block_size_log2, format_info->cpp[i], 10865ca02815Sjsg &block_width, &block_height); 10875ca02815Sjsg } 10885ca02815Sjsg 10895ca02815Sjsg ret = amdgpu_display_verify_plane(rfb, i, format_info, 10905ca02815Sjsg block_width, block_height, block_size_log2); 10915ca02815Sjsg if (ret) 10925ca02815Sjsg return ret; 10935ca02815Sjsg } 10945ca02815Sjsg 1095*7ac8356aSjsg if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 && 1096*7ac8356aSjsg AMD_FMT_MOD_GET(DCC, modifier)) { 10975ca02815Sjsg if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { 10985ca02815Sjsg block_size_log2 = get_dcc_block_size(modifier, false, false); 10995ca02815Sjsg get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], 11005ca02815Sjsg &block_width, &block_height); 11015ca02815Sjsg ret = amdgpu_display_verify_plane(rfb, i, format_info, 11025ca02815Sjsg block_width, block_height, 11035ca02815Sjsg block_size_log2); 11045ca02815Sjsg if (ret) 11055ca02815Sjsg return ret; 11065ca02815Sjsg 11075ca02815Sjsg ++i; 11085ca02815Sjsg block_size_log2 = get_dcc_block_size(modifier, true, true); 11095ca02815Sjsg } else { 11105ca02815Sjsg bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier); 11115ca02815Sjsg 11125ca02815Sjsg block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned); 11135ca02815Sjsg } 11145ca02815Sjsg get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], 11155ca02815Sjsg &block_width, &block_height); 11165ca02815Sjsg ret = amdgpu_display_verify_plane(rfb, i, format_info, 11175ca02815Sjsg block_width, block_height, block_size_log2); 11185ca02815Sjsg if (ret) 11195ca02815Sjsg return ret; 11205ca02815Sjsg } 11215ca02815Sjsg 11225ca02815Sjsg return 0; 11235ca02815Sjsg } 11245ca02815Sjsg 11255ca02815Sjsg static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, 11265ca02815Sjsg uint64_t *tiling_flags, bool *tmz_surface) 11275ca02815Sjsg { 11285ca02815Sjsg struct amdgpu_bo *rbo; 11295ca02815Sjsg int r; 11305ca02815Sjsg 11315ca02815Sjsg if (!amdgpu_fb) { 11325ca02815Sjsg *tiling_flags = 0; 11335ca02815Sjsg *tmz_surface = false; 11345ca02815Sjsg return 0; 11355ca02815Sjsg } 11365ca02815Sjsg 11375ca02815Sjsg rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); 11385ca02815Sjsg r = amdgpu_bo_reserve(rbo, false); 11395ca02815Sjsg 11405ca02815Sjsg if (unlikely(r)) { 11415ca02815Sjsg /* Don't show error message when returning -ERESTARTSYS */ 11425ca02815Sjsg if (r != -ERESTARTSYS) 11435ca02815Sjsg DRM_ERROR("Unable to reserve buffer: %d\n", r); 11445ca02815Sjsg return r; 11455ca02815Sjsg } 11465ca02815Sjsg 11475ca02815Sjsg if (tiling_flags) 11485ca02815Sjsg amdgpu_bo_get_tiling_flags(rbo, tiling_flags); 11495ca02815Sjsg 11505ca02815Sjsg if (tmz_surface) 11515ca02815Sjsg *tmz_surface = amdgpu_bo_encrypted(rbo); 11525ca02815Sjsg 11535ca02815Sjsg amdgpu_bo_unreserve(rbo); 11545ca02815Sjsg 11555ca02815Sjsg return r; 11565ca02815Sjsg } 11575ca02815Sjsg 11581bb76ff1Sjsg static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, 1159fb4d8502Sjsg struct amdgpu_framebuffer *rfb, 11601bb76ff1Sjsg struct drm_file *file_priv, 1161fb4d8502Sjsg const struct drm_mode_fb_cmd2 *mode_cmd, 1162fb4d8502Sjsg struct drm_gem_object *obj) 1163fb4d8502Sjsg { 1164fb4d8502Sjsg int ret; 11655ca02815Sjsg 1166fb4d8502Sjsg rfb->base.obj[0] = obj; 1167fb4d8502Sjsg drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); 11685ca02815Sjsg /* Verify that the modifier is supported. */ 11695ca02815Sjsg if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, 11705ca02815Sjsg mode_cmd->modifier[0])) { 11715ca02815Sjsg drm_dbg_kms(dev, 11725ca02815Sjsg "unsupported pixel format %p4cc / modifier 0x%llx\n", 11735ca02815Sjsg &mode_cmd->pixel_format, mode_cmd->modifier[0]); 11745ca02815Sjsg 11755ca02815Sjsg ret = -EINVAL; 11765ca02815Sjsg goto err; 11775ca02815Sjsg } 11785ca02815Sjsg 11795ca02815Sjsg ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); 11805ca02815Sjsg if (ret) 11815ca02815Sjsg goto err; 11825ca02815Sjsg 11837cddb3e7Sjsg if (drm_drv_uses_atomic_modeset(dev)) 11847cddb3e7Sjsg ret = drm_framebuffer_init(dev, &rfb->base, 11857cddb3e7Sjsg &amdgpu_fb_funcs_atomic); 11867cddb3e7Sjsg else 11875ca02815Sjsg ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); 11881bb76ff1Sjsg 11895ca02815Sjsg if (ret) 11905ca02815Sjsg goto err; 11915ca02815Sjsg 11925ca02815Sjsg return 0; 11935ca02815Sjsg err: 11945ca02815Sjsg drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret); 11955ca02815Sjsg rfb->base.obj[0] = NULL; 11965ca02815Sjsg return ret; 11975ca02815Sjsg } 11985ca02815Sjsg 11991bb76ff1Sjsg static int amdgpu_display_framebuffer_init(struct drm_device *dev, 12005ca02815Sjsg struct amdgpu_framebuffer *rfb, 12015ca02815Sjsg const struct drm_mode_fb_cmd2 *mode_cmd, 12025ca02815Sjsg struct drm_gem_object *obj) 12035ca02815Sjsg { 12045ca02815Sjsg struct amdgpu_device *adev = drm_to_adev(dev); 12055ca02815Sjsg int ret, i; 12065ca02815Sjsg 12075ca02815Sjsg /* 12085ca02815Sjsg * This needs to happen before modifier conversion as that might change 12095ca02815Sjsg * the number of planes. 12105ca02815Sjsg */ 12115ca02815Sjsg for (i = 1; i < rfb->base.format->num_planes; ++i) { 12125ca02815Sjsg if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 12135ca02815Sjsg drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n", 12145ca02815Sjsg i, mode_cmd->handles[0], mode_cmd->handles[i]); 12155ca02815Sjsg ret = -EINVAL; 12165ca02815Sjsg return ret; 12175ca02815Sjsg } 12185ca02815Sjsg } 12195ca02815Sjsg 12205ca02815Sjsg ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface); 12215ca02815Sjsg if (ret) 12225ca02815Sjsg return ret; 12235ca02815Sjsg 12241bb76ff1Sjsg if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) { 12255ca02815Sjsg drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, 12265ca02815Sjsg "GFX9+ requires FB check based on format modifier\n"); 12275ca02815Sjsg ret = check_tiling_flags_gfx6(rfb); 12285ca02815Sjsg if (ret) 12295ca02815Sjsg return ret; 12305ca02815Sjsg } 12315ca02815Sjsg 12321bb76ff1Sjsg if (!dev->mode_config.fb_modifiers_not_supported && 12335ca02815Sjsg !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { 12345ca02815Sjsg ret = convert_tiling_flags_to_modifier(rfb); 12355ca02815Sjsg if (ret) { 12365ca02815Sjsg drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier", 12375ca02815Sjsg rfb->tiling_flags); 12385ca02815Sjsg return ret; 12395ca02815Sjsg } 12405ca02815Sjsg } 12415ca02815Sjsg 12425ca02815Sjsg ret = amdgpu_display_verify_sizes(rfb); 12435ca02815Sjsg if (ret) 12445ca02815Sjsg return ret; 12455ca02815Sjsg 12465ca02815Sjsg for (i = 0; i < rfb->base.format->num_planes; ++i) { 12475ca02815Sjsg drm_gem_object_get(rfb->base.obj[0]); 12485ca02815Sjsg rfb->base.obj[i] = rfb->base.obj[0]; 12495ca02815Sjsg } 12505ca02815Sjsg 1251fb4d8502Sjsg return 0; 1252fb4d8502Sjsg } 1253fb4d8502Sjsg 1254fb4d8502Sjsg struct drm_framebuffer * 1255fb4d8502Sjsg amdgpu_display_user_framebuffer_create(struct drm_device *dev, 1256fb4d8502Sjsg struct drm_file *file_priv, 1257fb4d8502Sjsg const struct drm_mode_fb_cmd2 *mode_cmd) 1258fb4d8502Sjsg { 1259fb4d8502Sjsg struct amdgpu_framebuffer *amdgpu_fb; 12605ca02815Sjsg struct drm_gem_object *obj; 12615ca02815Sjsg struct amdgpu_bo *bo; 12625ca02815Sjsg uint32_t domains; 1263fb4d8502Sjsg int ret; 1264fb4d8502Sjsg 1265fb4d8502Sjsg obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); 1266fb4d8502Sjsg if (obj == NULL) { 126709f85244Sjsg drm_dbg_kms(dev, 126809f85244Sjsg "No GEM object associated to handle 0x%08X, can't create framebuffer\n", 126909f85244Sjsg mode_cmd->handles[0]); 127009f85244Sjsg 1271fb4d8502Sjsg return ERR_PTR(-ENOENT); 1272fb4d8502Sjsg } 1273fb4d8502Sjsg 1274fb4d8502Sjsg /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ 12755ca02815Sjsg bo = gem_to_amdgpu_bo(obj); 12765ca02815Sjsg domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); 12775ca02815Sjsg if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { 12785ca02815Sjsg drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); 12795ca02815Sjsg drm_gem_object_put(obj); 1280fb4d8502Sjsg return ERR_PTR(-EINVAL); 1281fb4d8502Sjsg } 1282fb4d8502Sjsg 1283fb4d8502Sjsg amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); 1284fb4d8502Sjsg if (amdgpu_fb == NULL) { 1285ad8b1aafSjsg drm_gem_object_put(obj); 1286fb4d8502Sjsg return ERR_PTR(-ENOMEM); 1287fb4d8502Sjsg } 1288fb4d8502Sjsg 12895ca02815Sjsg ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, 12905ca02815Sjsg mode_cmd, obj); 1291fb4d8502Sjsg if (ret) { 1292fb4d8502Sjsg kfree(amdgpu_fb); 1293ad8b1aafSjsg drm_gem_object_put(obj); 1294fb4d8502Sjsg return ERR_PTR(ret); 1295fb4d8502Sjsg } 1296fb4d8502Sjsg 12975ca02815Sjsg drm_gem_object_put(obj); 1298fb4d8502Sjsg return &amdgpu_fb->base; 1299fb4d8502Sjsg } 1300fb4d8502Sjsg 1301fb4d8502Sjsg const struct drm_mode_config_funcs amdgpu_mode_funcs = { 1302fb4d8502Sjsg .fb_create = amdgpu_display_user_framebuffer_create, 1303fb4d8502Sjsg }; 1304fb4d8502Sjsg 1305f005ef32Sjsg static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = { 1306f005ef32Sjsg { UNDERSCAN_OFF, "off" }, 1307fb4d8502Sjsg { UNDERSCAN_ON, "on" }, 1308fb4d8502Sjsg { UNDERSCAN_AUTO, "auto" }, 1309fb4d8502Sjsg }; 1310fb4d8502Sjsg 1311f005ef32Sjsg static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = { 1312f005ef32Sjsg { AMDGPU_AUDIO_DISABLE, "off" }, 1313fb4d8502Sjsg { AMDGPU_AUDIO_ENABLE, "on" }, 1314fb4d8502Sjsg { AMDGPU_AUDIO_AUTO, "auto" }, 1315fb4d8502Sjsg }; 1316fb4d8502Sjsg 1317fb4d8502Sjsg /* XXX support different dither options? spatial, temporal, both, etc. */ 1318f005ef32Sjsg static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { 1319f005ef32Sjsg { AMDGPU_FMT_DITHER_DISABLE, "off" }, 1320fb4d8502Sjsg { AMDGPU_FMT_DITHER_ENABLE, "on" }, 1321fb4d8502Sjsg }; 1322fb4d8502Sjsg 1323fb4d8502Sjsg int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) 1324fb4d8502Sjsg { 1325fb4d8502Sjsg int sz; 1326fb4d8502Sjsg 1327fb4d8502Sjsg adev->mode_info.coherent_mode_property = 1328ad8b1aafSjsg drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1); 1329fb4d8502Sjsg if (!adev->mode_info.coherent_mode_property) 1330fb4d8502Sjsg return -ENOMEM; 1331fb4d8502Sjsg 1332fb4d8502Sjsg adev->mode_info.load_detect_property = 1333ad8b1aafSjsg drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1); 1334fb4d8502Sjsg if (!adev->mode_info.load_detect_property) 1335fb4d8502Sjsg return -ENOMEM; 1336fb4d8502Sjsg 1337ad8b1aafSjsg drm_mode_create_scaling_mode_property(adev_to_drm(adev)); 1338fb4d8502Sjsg 1339fb4d8502Sjsg sz = ARRAY_SIZE(amdgpu_underscan_enum_list); 1340fb4d8502Sjsg adev->mode_info.underscan_property = 1341ad8b1aafSjsg drm_property_create_enum(adev_to_drm(adev), 0, 1342fb4d8502Sjsg "underscan", 1343fb4d8502Sjsg amdgpu_underscan_enum_list, sz); 1344fb4d8502Sjsg 1345fb4d8502Sjsg adev->mode_info.underscan_hborder_property = 1346ad8b1aafSjsg drm_property_create_range(adev_to_drm(adev), 0, 1347fb4d8502Sjsg "underscan hborder", 0, 128); 1348fb4d8502Sjsg if (!adev->mode_info.underscan_hborder_property) 1349fb4d8502Sjsg return -ENOMEM; 1350fb4d8502Sjsg 1351fb4d8502Sjsg adev->mode_info.underscan_vborder_property = 1352ad8b1aafSjsg drm_property_create_range(adev_to_drm(adev), 0, 1353fb4d8502Sjsg "underscan vborder", 0, 128); 1354fb4d8502Sjsg if (!adev->mode_info.underscan_vborder_property) 1355fb4d8502Sjsg return -ENOMEM; 1356fb4d8502Sjsg 1357fb4d8502Sjsg sz = ARRAY_SIZE(amdgpu_audio_enum_list); 1358fb4d8502Sjsg adev->mode_info.audio_property = 1359ad8b1aafSjsg drm_property_create_enum(adev_to_drm(adev), 0, 1360fb4d8502Sjsg "audio", 1361fb4d8502Sjsg amdgpu_audio_enum_list, sz); 1362fb4d8502Sjsg 1363fb4d8502Sjsg sz = ARRAY_SIZE(amdgpu_dither_enum_list); 1364fb4d8502Sjsg adev->mode_info.dither_property = 1365ad8b1aafSjsg drm_property_create_enum(adev_to_drm(adev), 0, 1366fb4d8502Sjsg "dither", 1367fb4d8502Sjsg amdgpu_dither_enum_list, sz); 1368fb4d8502Sjsg 1369f005ef32Sjsg if (adev->dc_enabled) { 1370c349dbc7Sjsg adev->mode_info.abm_level_property = 1371ad8b1aafSjsg drm_property_create_range(adev_to_drm(adev), 0, 1372c349dbc7Sjsg "abm level", 0, 4); 1373c349dbc7Sjsg if (!adev->mode_info.abm_level_property) 1374fb4d8502Sjsg return -ENOMEM; 1375fb4d8502Sjsg } 1376fb4d8502Sjsg 1377fb4d8502Sjsg return 0; 1378fb4d8502Sjsg } 1379fb4d8502Sjsg 1380fb4d8502Sjsg void amdgpu_display_update_priority(struct amdgpu_device *adev) 1381fb4d8502Sjsg { 1382fb4d8502Sjsg /* adjustment options for the display watermarks */ 1383fb4d8502Sjsg if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) 1384fb4d8502Sjsg adev->mode_info.disp_priority = 0; 1385fb4d8502Sjsg else 1386fb4d8502Sjsg adev->mode_info.disp_priority = amdgpu_disp_priority; 1387fb4d8502Sjsg 1388fb4d8502Sjsg } 1389fb4d8502Sjsg 1390fb4d8502Sjsg static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode) 1391fb4d8502Sjsg { 1392fb4d8502Sjsg /* try and guess if this is a tv or a monitor */ 1393fb4d8502Sjsg if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ 1394fb4d8502Sjsg (mode->vdisplay == 576) || /* 576p */ 1395fb4d8502Sjsg (mode->vdisplay == 720) || /* 720p */ 1396fb4d8502Sjsg (mode->vdisplay == 1080)) /* 1080p */ 1397fb4d8502Sjsg return true; 1398fb4d8502Sjsg else 1399fb4d8502Sjsg return false; 1400fb4d8502Sjsg } 1401fb4d8502Sjsg 1402fb4d8502Sjsg bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1403fb4d8502Sjsg const struct drm_display_mode *mode, 1404fb4d8502Sjsg struct drm_display_mode *adjusted_mode) 1405fb4d8502Sjsg { 1406fb4d8502Sjsg struct drm_device *dev = crtc->dev; 1407fb4d8502Sjsg struct drm_encoder *encoder; 1408fb4d8502Sjsg struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1409fb4d8502Sjsg struct amdgpu_encoder *amdgpu_encoder; 1410fb4d8502Sjsg struct drm_connector *connector; 1411fb4d8502Sjsg u32 src_v = 1, dst_v = 1; 1412fb4d8502Sjsg u32 src_h = 1, dst_h = 1; 1413fb4d8502Sjsg 1414fb4d8502Sjsg amdgpu_crtc->h_border = 0; 1415fb4d8502Sjsg amdgpu_crtc->v_border = 0; 1416fb4d8502Sjsg 1417fb4d8502Sjsg list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1418fb4d8502Sjsg if (encoder->crtc != crtc) 1419fb4d8502Sjsg continue; 1420fb4d8502Sjsg amdgpu_encoder = to_amdgpu_encoder(encoder); 1421fb4d8502Sjsg connector = amdgpu_get_connector_for_encoder(encoder); 1422fb4d8502Sjsg 1423fb4d8502Sjsg /* set scaling */ 1424fb4d8502Sjsg if (amdgpu_encoder->rmx_type == RMX_OFF) 1425fb4d8502Sjsg amdgpu_crtc->rmx_type = RMX_OFF; 1426fb4d8502Sjsg else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay || 1427fb4d8502Sjsg mode->vdisplay < amdgpu_encoder->native_mode.vdisplay) 1428fb4d8502Sjsg amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type; 1429fb4d8502Sjsg else 1430fb4d8502Sjsg amdgpu_crtc->rmx_type = RMX_OFF; 1431fb4d8502Sjsg /* copy native mode */ 1432fb4d8502Sjsg memcpy(&amdgpu_crtc->native_mode, 1433fb4d8502Sjsg &amdgpu_encoder->native_mode, 1434fb4d8502Sjsg sizeof(struct drm_display_mode)); 1435fb4d8502Sjsg src_v = crtc->mode.vdisplay; 1436fb4d8502Sjsg dst_v = amdgpu_crtc->native_mode.vdisplay; 1437fb4d8502Sjsg src_h = crtc->mode.hdisplay; 1438fb4d8502Sjsg dst_h = amdgpu_crtc->native_mode.hdisplay; 1439fb4d8502Sjsg 1440fb4d8502Sjsg /* fix up for overscan on hdmi */ 1441fb4d8502Sjsg if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && 1442fb4d8502Sjsg ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || 1443fb4d8502Sjsg ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && 14441bb76ff1Sjsg connector->display_info.is_hdmi && 1445fb4d8502Sjsg amdgpu_display_is_hdtv_mode(mode)))) { 1446fb4d8502Sjsg if (amdgpu_encoder->underscan_hborder != 0) 1447fb4d8502Sjsg amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; 1448fb4d8502Sjsg else 1449fb4d8502Sjsg amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16; 1450fb4d8502Sjsg if (amdgpu_encoder->underscan_vborder != 0) 1451fb4d8502Sjsg amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder; 1452fb4d8502Sjsg else 1453fb4d8502Sjsg amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16; 1454fb4d8502Sjsg amdgpu_crtc->rmx_type = RMX_FULL; 1455fb4d8502Sjsg src_v = crtc->mode.vdisplay; 1456fb4d8502Sjsg dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2); 1457fb4d8502Sjsg src_h = crtc->mode.hdisplay; 1458fb4d8502Sjsg dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2); 1459fb4d8502Sjsg } 1460fb4d8502Sjsg } 1461fb4d8502Sjsg if (amdgpu_crtc->rmx_type != RMX_OFF) { 1462fb4d8502Sjsg fixed20_12 a, b; 146309f85244Sjsg 1464fb4d8502Sjsg a.full = dfixed_const(src_v); 1465fb4d8502Sjsg b.full = dfixed_const(dst_v); 1466fb4d8502Sjsg amdgpu_crtc->vsc.full = dfixed_div(a, b); 1467fb4d8502Sjsg a.full = dfixed_const(src_h); 1468fb4d8502Sjsg b.full = dfixed_const(dst_h); 1469fb4d8502Sjsg amdgpu_crtc->hsc.full = dfixed_div(a, b); 1470fb4d8502Sjsg } else { 1471fb4d8502Sjsg amdgpu_crtc->vsc.full = dfixed_const(1); 1472fb4d8502Sjsg amdgpu_crtc->hsc.full = dfixed_const(1); 1473fb4d8502Sjsg } 1474fb4d8502Sjsg return true; 1475fb4d8502Sjsg } 1476fb4d8502Sjsg 1477fb4d8502Sjsg /* 1478fb4d8502Sjsg * Retrieve current video scanout position of crtc on a given gpu, and 1479fb4d8502Sjsg * an optional accurate timestamp of when query happened. 1480fb4d8502Sjsg * 1481fb4d8502Sjsg * \param dev Device to query. 1482fb4d8502Sjsg * \param pipe Crtc to query. 148309f85244Sjsg * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1484fb4d8502Sjsg * For driver internal use only also supports these flags: 1485fb4d8502Sjsg * 1486fb4d8502Sjsg * USE_REAL_VBLANKSTART to use the real start of vblank instead 1487fb4d8502Sjsg * of a fudged earlier start of vblank. 1488fb4d8502Sjsg * 1489fb4d8502Sjsg * GET_DISTANCE_TO_VBLANKSTART to return distance to the 1490fb4d8502Sjsg * fudged earlier start of vblank in *vpos and the distance 1491fb4d8502Sjsg * to true start of vblank in *hpos. 1492fb4d8502Sjsg * 1493fb4d8502Sjsg * \param *vpos Location where vertical scanout position should be stored. 1494fb4d8502Sjsg * \param *hpos Location where horizontal scanout position should go. 1495fb4d8502Sjsg * \param *stime Target location for timestamp taken immediately before 1496fb4d8502Sjsg * scanout position query. Can be NULL to skip timestamp. 1497fb4d8502Sjsg * \param *etime Target location for timestamp taken immediately after 1498fb4d8502Sjsg * scanout position query. Can be NULL to skip timestamp. 1499fb4d8502Sjsg * 1500fb4d8502Sjsg * Returns vpos as a positive number while in active scanout area. 1501fb4d8502Sjsg * Returns vpos as a negative number inside vblank, counting the number 1502fb4d8502Sjsg * of scanlines to go until end of vblank, e.g., -1 means "one scanline 1503fb4d8502Sjsg * until start of active scanout / end of vblank." 1504fb4d8502Sjsg * 1505fb4d8502Sjsg * \return Flags, or'ed together as follows: 1506fb4d8502Sjsg * 1507fb4d8502Sjsg * DRM_SCANOUTPOS_VALID = Query successful. 1508fb4d8502Sjsg * DRM_SCANOUTPOS_INVBL = Inside vblank. 1509fb4d8502Sjsg * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1510fb4d8502Sjsg * this flag means that returned position may be offset by a constant but 1511fb4d8502Sjsg * unknown small number of scanlines wrt. real scanout position. 1512fb4d8502Sjsg * 1513fb4d8502Sjsg */ 1514fb4d8502Sjsg int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, 1515fb4d8502Sjsg unsigned int pipe, unsigned int flags, int *vpos, 1516fb4d8502Sjsg int *hpos, ktime_t *stime, ktime_t *etime, 1517fb4d8502Sjsg const struct drm_display_mode *mode) 1518fb4d8502Sjsg { 1519fb4d8502Sjsg u32 vbl = 0, position = 0; 1520fb4d8502Sjsg int vbl_start, vbl_end, vtotal, ret = 0; 1521fb4d8502Sjsg bool in_vbl = true; 1522fb4d8502Sjsg 1523ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(dev); 1524fb4d8502Sjsg 1525fb4d8502Sjsg /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1526fb4d8502Sjsg 1527fb4d8502Sjsg /* Get optional system timestamp before query. */ 1528fb4d8502Sjsg if (stime) 1529fb4d8502Sjsg *stime = ktime_get(); 1530fb4d8502Sjsg 1531fb4d8502Sjsg if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0) 1532fb4d8502Sjsg ret |= DRM_SCANOUTPOS_VALID; 1533fb4d8502Sjsg 1534fb4d8502Sjsg /* Get optional system timestamp after query. */ 1535fb4d8502Sjsg if (etime) 1536fb4d8502Sjsg *etime = ktime_get(); 1537fb4d8502Sjsg 1538fb4d8502Sjsg /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1539fb4d8502Sjsg 1540fb4d8502Sjsg /* Decode into vertical and horizontal scanout position. */ 1541fb4d8502Sjsg *vpos = position & 0x1fff; 1542fb4d8502Sjsg *hpos = (position >> 16) & 0x1fff; 1543fb4d8502Sjsg 1544fb4d8502Sjsg /* Valid vblank area boundaries from gpu retrieved? */ 1545fb4d8502Sjsg if (vbl > 0) { 1546fb4d8502Sjsg /* Yes: Decode. */ 1547fb4d8502Sjsg ret |= DRM_SCANOUTPOS_ACCURATE; 1548fb4d8502Sjsg vbl_start = vbl & 0x1fff; 1549fb4d8502Sjsg vbl_end = (vbl >> 16) & 0x1fff; 1550f005ef32Sjsg } else { 1551fb4d8502Sjsg /* No: Fake something reasonable which gives at least ok results. */ 1552fb4d8502Sjsg vbl_start = mode->crtc_vdisplay; 1553fb4d8502Sjsg vbl_end = 0; 1554fb4d8502Sjsg } 1555fb4d8502Sjsg 1556fb4d8502Sjsg /* Called from driver internal vblank counter query code? */ 1557fb4d8502Sjsg if (flags & GET_DISTANCE_TO_VBLANKSTART) { 1558fb4d8502Sjsg /* Caller wants distance from real vbl_start in *hpos */ 1559fb4d8502Sjsg *hpos = *vpos - vbl_start; 1560fb4d8502Sjsg } 1561fb4d8502Sjsg 1562fb4d8502Sjsg /* Fudge vblank to start a few scanlines earlier to handle the 1563fb4d8502Sjsg * problem that vblank irqs fire a few scanlines before start 1564fb4d8502Sjsg * of vblank. Some driver internal callers need the true vblank 1565fb4d8502Sjsg * start to be used and signal this via the USE_REAL_VBLANKSTART flag. 1566fb4d8502Sjsg * 1567fb4d8502Sjsg * The cause of the "early" vblank irq is that the irq is triggered 1568fb4d8502Sjsg * by the line buffer logic when the line buffer read position enters 1569fb4d8502Sjsg * the vblank, whereas our crtc scanout position naturally lags the 1570fb4d8502Sjsg * line buffer read position. 1571fb4d8502Sjsg */ 1572fb4d8502Sjsg if (!(flags & USE_REAL_VBLANKSTART)) 1573fb4d8502Sjsg vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; 1574fb4d8502Sjsg 1575fb4d8502Sjsg /* Test scanout position against vblank region. */ 1576fb4d8502Sjsg if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1577fb4d8502Sjsg in_vbl = false; 1578fb4d8502Sjsg 1579fb4d8502Sjsg /* In vblank? */ 1580fb4d8502Sjsg if (in_vbl) 1581fb4d8502Sjsg ret |= DRM_SCANOUTPOS_IN_VBLANK; 1582fb4d8502Sjsg 1583fb4d8502Sjsg /* Called from driver internal vblank counter query code? */ 1584fb4d8502Sjsg if (flags & GET_DISTANCE_TO_VBLANKSTART) { 1585fb4d8502Sjsg /* Caller wants distance from fudged earlier vbl_start */ 1586fb4d8502Sjsg *vpos -= vbl_start; 1587fb4d8502Sjsg return ret; 1588fb4d8502Sjsg } 1589fb4d8502Sjsg 1590fb4d8502Sjsg /* Check if inside vblank area and apply corrective offsets: 1591fb4d8502Sjsg * vpos will then be >=0 in video scanout area, but negative 1592fb4d8502Sjsg * within vblank area, counting down the number of lines until 1593fb4d8502Sjsg * start of scanout. 1594fb4d8502Sjsg */ 1595fb4d8502Sjsg 1596fb4d8502Sjsg /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1597fb4d8502Sjsg if (in_vbl && (*vpos >= vbl_start)) { 1598fb4d8502Sjsg vtotal = mode->crtc_vtotal; 1599c349dbc7Sjsg 1600c349dbc7Sjsg /* With variable refresh rate displays the vpos can exceed 1601c349dbc7Sjsg * the vtotal value. Clamp to 0 to return -vbl_end instead 1602c349dbc7Sjsg * of guessing the remaining number of lines until scanout. 1603c349dbc7Sjsg */ 1604c349dbc7Sjsg *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0; 1605fb4d8502Sjsg } 1606fb4d8502Sjsg 1607fb4d8502Sjsg /* Correct for shifted end of vbl at vbl_end. */ 1608fb4d8502Sjsg *vpos = *vpos - vbl_end; 1609fb4d8502Sjsg 1610fb4d8502Sjsg return ret; 1611fb4d8502Sjsg } 1612fb4d8502Sjsg 1613fb4d8502Sjsg int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) 1614fb4d8502Sjsg { 1615fb4d8502Sjsg if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 1616fb4d8502Sjsg return AMDGPU_CRTC_IRQ_NONE; 1617fb4d8502Sjsg 1618fb4d8502Sjsg switch (crtc) { 1619fb4d8502Sjsg case 0: 1620fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK1; 1621fb4d8502Sjsg case 1: 1622fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK2; 1623fb4d8502Sjsg case 2: 1624fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK3; 1625fb4d8502Sjsg case 3: 1626fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK4; 1627fb4d8502Sjsg case 4: 1628fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK5; 1629fb4d8502Sjsg case 5: 1630fb4d8502Sjsg return AMDGPU_CRTC_IRQ_VBLANK6; 1631fb4d8502Sjsg default: 1632fb4d8502Sjsg return AMDGPU_CRTC_IRQ_NONE; 1633fb4d8502Sjsg } 1634fb4d8502Sjsg } 1635c349dbc7Sjsg 1636c349dbc7Sjsg bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, 1637c349dbc7Sjsg bool in_vblank_irq, int *vpos, 1638c349dbc7Sjsg int *hpos, ktime_t *stime, ktime_t *etime, 1639c349dbc7Sjsg const struct drm_display_mode *mode) 1640c349dbc7Sjsg { 1641c349dbc7Sjsg struct drm_device *dev = crtc->dev; 1642c349dbc7Sjsg unsigned int pipe = crtc->index; 1643c349dbc7Sjsg 1644c349dbc7Sjsg return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, 1645c349dbc7Sjsg stime, etime, mode); 1646c349dbc7Sjsg } 16475ca02815Sjsg 16481bb76ff1Sjsg static bool 16491bb76ff1Sjsg amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) 16501bb76ff1Sjsg { 16511bb76ff1Sjsg struct drm_device *dev = adev_to_drm(adev); 16521bb76ff1Sjsg struct drm_fb_helper *fb_helper = dev->fb_helper; 16531bb76ff1Sjsg 16541bb76ff1Sjsg if (!fb_helper || !fb_helper->buffer) 16551bb76ff1Sjsg return false; 16561bb76ff1Sjsg 16571bb76ff1Sjsg if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj) 16581bb76ff1Sjsg return false; 16591bb76ff1Sjsg 16601bb76ff1Sjsg return true; 16611bb76ff1Sjsg } 16621bb76ff1Sjsg 16635ca02815Sjsg int amdgpu_display_suspend_helper(struct amdgpu_device *adev) 16645ca02815Sjsg { 16655ca02815Sjsg struct drm_device *dev = adev_to_drm(adev); 16665ca02815Sjsg struct drm_crtc *crtc; 16675ca02815Sjsg struct drm_connector *connector; 16685ca02815Sjsg struct drm_connector_list_iter iter; 16695ca02815Sjsg int r; 16705ca02815Sjsg 1671f005ef32Sjsg drm_kms_helper_poll_disable(dev); 1672f005ef32Sjsg 16735ca02815Sjsg /* turn off display hw */ 16745ca02815Sjsg drm_modeset_lock_all(dev); 16755ca02815Sjsg drm_connector_list_iter_begin(dev, &iter); 16765ca02815Sjsg drm_for_each_connector_iter(connector, &iter) 16775ca02815Sjsg drm_helper_connector_dpms(connector, 16785ca02815Sjsg DRM_MODE_DPMS_OFF); 16795ca02815Sjsg drm_connector_list_iter_end(&iter); 16805ca02815Sjsg drm_modeset_unlock_all(dev); 16815ca02815Sjsg /* unpin the front buffers and cursors */ 16825ca02815Sjsg list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 16835ca02815Sjsg struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 16845ca02815Sjsg struct drm_framebuffer *fb = crtc->primary->fb; 16855ca02815Sjsg struct amdgpu_bo *robj; 16865ca02815Sjsg 16875ca02815Sjsg if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 16885ca02815Sjsg struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 168909f85244Sjsg 16905ca02815Sjsg r = amdgpu_bo_reserve(aobj, true); 16915ca02815Sjsg if (r == 0) { 16925ca02815Sjsg amdgpu_bo_unpin(aobj); 16935ca02815Sjsg amdgpu_bo_unreserve(aobj); 16945ca02815Sjsg } 16955ca02815Sjsg } 16965ca02815Sjsg 169709f85244Sjsg if (!fb || !fb->obj[0]) 16985ca02815Sjsg continue; 169909f85244Sjsg 17005ca02815Sjsg robj = gem_to_amdgpu_bo(fb->obj[0]); 17011bb76ff1Sjsg if (!amdgpu_display_robj_is_fb(adev, robj)) { 17025ca02815Sjsg r = amdgpu_bo_reserve(robj, true); 17035ca02815Sjsg if (r == 0) { 17045ca02815Sjsg amdgpu_bo_unpin(robj); 17055ca02815Sjsg amdgpu_bo_unreserve(robj); 17065ca02815Sjsg } 17075ca02815Sjsg } 17085ca02815Sjsg } 17095ca02815Sjsg return 0; 17105ca02815Sjsg } 17115ca02815Sjsg 17125ca02815Sjsg int amdgpu_display_resume_helper(struct amdgpu_device *adev) 17135ca02815Sjsg { 17145ca02815Sjsg struct drm_device *dev = adev_to_drm(adev); 17155ca02815Sjsg struct drm_connector *connector; 17165ca02815Sjsg struct drm_connector_list_iter iter; 17175ca02815Sjsg struct drm_crtc *crtc; 17185ca02815Sjsg int r; 17195ca02815Sjsg 17205ca02815Sjsg /* pin cursors */ 17215ca02815Sjsg list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 17225ca02815Sjsg struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 17235ca02815Sjsg 17245ca02815Sjsg if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 17255ca02815Sjsg struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 172609f85244Sjsg 17275ca02815Sjsg r = amdgpu_bo_reserve(aobj, true); 17285ca02815Sjsg if (r == 0) { 17295ca02815Sjsg r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 17305ca02815Sjsg if (r != 0) 17315ca02815Sjsg dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); 17325ca02815Sjsg amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 17335ca02815Sjsg amdgpu_bo_unreserve(aobj); 17345ca02815Sjsg } 17355ca02815Sjsg } 17365ca02815Sjsg } 17375ca02815Sjsg 17385ca02815Sjsg drm_helper_resume_force_mode(dev); 17395ca02815Sjsg 17405ca02815Sjsg /* turn on display hw */ 17415ca02815Sjsg drm_modeset_lock_all(dev); 17425ca02815Sjsg 17435ca02815Sjsg drm_connector_list_iter_begin(dev, &iter); 17445ca02815Sjsg drm_for_each_connector_iter(connector, &iter) 17455ca02815Sjsg drm_helper_connector_dpms(connector, 17465ca02815Sjsg DRM_MODE_DPMS_ON); 17475ca02815Sjsg drm_connector_list_iter_end(&iter); 17485ca02815Sjsg 17495ca02815Sjsg drm_modeset_unlock_all(dev); 17505ca02815Sjsg 1751f005ef32Sjsg drm_kms_helper_poll_enable(dev); 1752f005ef32Sjsg 17535ca02815Sjsg return 0; 17545ca02815Sjsg } 17555ca02815Sjsg 1756