1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/bitops.h> 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include <linux/highmem.h> 33 34 /** @file i915_gem_tiling.c 35 * 36 * Support for managing tiling state of buffer objects. 37 * 38 * The idea behind tiling is to increase cache hit rates by rearranging 39 * pixel data so that a group of pixel accesses are in the same cacheline. 40 * Performance improvement from doing this on the back/depth buffer are on 41 * the order of 30%. 42 * 43 * Intel architectures make this somewhat more complicated, though, by 44 * adjustments made to addressing of data when the memory is in interleaved 45 * mode (matched pairs of DIMMS) to improve memory bandwidth. 46 * For interleaved memory, the CPU sends every sequential 64 bytes 47 * to an alternate memory channel so it can get the bandwidth from both. 48 * 49 * The GPU also rearranges its accesses for increased bandwidth to interleaved 50 * memory, and it matches what the CPU does for non-tiled. However, when tiled 51 * it does it a little differently, since one walks addresses not just in the 52 * X direction but also Y. So, along with alternating channels when bit 53 * 6 of the address flips, it also alternates when other bits flip -- Bits 9 54 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) 55 * are common to both the 915 and 965-class hardware. 56 * 57 * The CPU also sometimes XORs in higher bits as well, to improve 58 * bandwidth doing strided access like we do so frequently in graphics. This 59 * is called "Channel XOR Randomization" in the MCH documentation. The result 60 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address 61 * decode. 62 * 63 * All of this bit 6 XORing has an effect on our memory management, 64 * as we need to make sure that the 3d driver can correctly address object 65 * contents. 66 * 67 * If we don't have interleaved memory, all tiling is safe and no swizzling is 68 * required. 69 * 70 * When bit 17 is XORed in, we simply refuse to tile at all. Bit 71 * 17 is not just a page offset, so as we page an objet out and back in, 72 * individual pages in it will have different bit 17 addresses, resulting in 73 * each 64 bytes being swapped with its neighbor! 74 * 75 * Otherwise, if interleaved, we have to tell the 3d driver what the address 76 * swizzling it needs to do is, since it's writing with the CPU to the pages 77 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the 78 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling 79 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order 80 * to match what the GPU expects. 81 */ 82 83 /** 84 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * access through main memory. 86 */ 87 void 88 i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 89 { 90 drm_i915_private_t *dev_priv = dev->dev_private; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 94 if (IS_VALLEYVIEW(dev)) { 95 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 96 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 97 } else if (INTEL_INFO(dev)->gen >= 6) { 98 uint32_t dimm_c0, dimm_c1; 99 dimm_c0 = I915_READ(MAD_DIMM_C0); 100 dimm_c1 = I915_READ(MAD_DIMM_C1); 101 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 102 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 103 /* Enable swizzling when the channels are populated with 104 * identically sized dimms. We don't need to check the 3rd 105 * channel because no cpu with gpu attached ships in that 106 * configuration. Also, swizzling only makes sense for 2 107 * channels anyway. */ 108 if (dimm_c0 == dimm_c1) { 109 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 110 swizzle_y = I915_BIT_6_SWIZZLE_9; 111 } else { 112 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 113 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 114 } 115 } else if (IS_GEN5(dev)) { 116 /* On Ironlake whatever DRAM config, GPU always do 117 * same swizzling setup. 118 */ 119 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 } else if (IS_GEN2(dev)) { 122 /* As far as we know, the 865 doesn't have these bit 6 123 * swizzling issues. 124 */ 125 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 126 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 127 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { 128 uint32_t dcc; 129 130 /* On 9xx chipsets, channel interleave by the CPU is 131 * determined by DCC. For single-channel, neither the CPU 132 * nor the GPU do swizzling. For dual channel interleaved, 133 * the GPU's interleave is bit 9 and 10 for X tiled, and bit 134 * 9 for Y tiled. The CPU's interleave is independent, and 135 * can be based on either bit 11 (haven't seen this yet) or 136 * bit 17 (common). 137 */ 138 dcc = I915_READ(DCC); 139 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 140 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: 141 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: 142 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 143 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 144 break; 145 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 146 if (dcc & DCC_CHANNEL_XOR_DISABLE) { 147 /* This is the base swizzling by the GPU for 148 * tiled buffers. 149 */ 150 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 151 swizzle_y = I915_BIT_6_SWIZZLE_9; 152 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 153 /* Bit 11 swizzling by the CPU in addition. */ 154 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 155 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 156 } else { 157 /* Bit 17 swizzling by the CPU in addition. */ 158 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; 159 swizzle_y = I915_BIT_6_SWIZZLE_9_17; 160 } 161 break; 162 } 163 if (dcc == 0xffffffff) { 164 DRM_ERROR("Couldn't read from MCHBAR. " 165 "Disabling tiling.\n"); 166 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 167 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 168 } 169 } else { 170 /* The 965, G33, and newer, have a very flexible memory 171 * configuration. It will enable dual-channel mode 172 * (interleaving) on as much memory as it can, and the GPU 173 * will additionally sometimes enable different bit 6 174 * swizzling for tiled objects from the CPU. 175 * 176 * Here's what I found on the G965: 177 * slot fill memory size swizzling 178 * 0A 0B 1A 1B 1-ch 2-ch 179 * 512 0 0 0 512 0 O 180 * 512 0 512 0 16 1008 X 181 * 512 0 0 512 16 1008 X 182 * 0 512 0 512 16 1008 X 183 * 1024 1024 1024 0 2048 1024 O 184 * 185 * We could probably detect this based on either the DRB 186 * matching, which was the case for the swizzling required in 187 * the table above, or from the 1-ch value being less than 188 * the minimum size of a rank. 189 */ 190 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 191 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 192 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 193 } else { 194 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 195 swizzle_y = I915_BIT_6_SWIZZLE_9; 196 } 197 } 198 199 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 200 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 201 } 202 203 /* Check pitch constriants for all chips & tiling formats */ 204 static bool 205 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 206 { 207 int tile_width; 208 209 /* Linear is always fine */ 210 if (tiling_mode == I915_TILING_NONE) 211 return true; 212 213 if (IS_GEN2(dev) || 214 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 215 tile_width = 128; 216 else 217 tile_width = 512; 218 219 /* check maximum stride & object size */ 220 if (INTEL_INFO(dev)->gen >= 4) { 221 /* i965 stores the end address of the gtt mapping in the fence 222 * reg, so dont bother to check the size */ 223 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 224 return false; 225 } else { 226 if (stride > 8192) 227 return false; 228 229 if (IS_GEN3(dev)) { 230 if (size > I830_FENCE_MAX_SIZE_VAL << 20) 231 return false; 232 } else { 233 if (size > I830_FENCE_MAX_SIZE_VAL << 19) 234 return false; 235 } 236 } 237 238 /* 965+ just needs multiples of tile width */ 239 if (INTEL_INFO(dev)->gen >= 4) { 240 if (stride & (tile_width - 1)) 241 return false; 242 return true; 243 } 244 245 /* Pre-965 needs power of two tile widths */ 246 if (stride < tile_width) 247 return false; 248 249 if (stride & (stride - 1)) 250 return false; 251 252 return true; 253 } 254 255 /* Is the current GTT allocation valid for the change in tiling? */ 256 static bool 257 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) 258 { 259 u32 size; 260 261 if (tiling_mode == I915_TILING_NONE) 262 return true; 263 264 if (INTEL_INFO(obj->base.dev)->gen >= 4) 265 return true; 266 267 if (INTEL_INFO(obj->base.dev)->gen == 3) { 268 if (obj->gtt_offset & ~I915_FENCE_START_MASK) 269 return false; 270 } else { 271 if (obj->gtt_offset & ~I830_FENCE_START_MASK) 272 return false; 273 } 274 275 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 276 if (obj->gtt_space->size != size) 277 return false; 278 279 if (obj->gtt_offset & (size - 1)) 280 return false; 281 282 return true; 283 } 284 285 /** 286 * Sets the tiling mode of an object, returning the required swizzling of 287 * bit 6 of addresses in the object. 288 */ 289 int 290 i915_gem_set_tiling(struct drm_device *dev, void *data, 291 struct drm_file *file) 292 { 293 struct drm_i915_gem_set_tiling *args = data; 294 drm_i915_private_t *dev_priv = dev->dev_private; 295 struct drm_i915_gem_object *obj; 296 int ret = 0; 297 298 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 299 if (&obj->base == NULL) 300 return -ENOENT; 301 302 if (!i915_tiling_ok(dev, 303 args->stride, obj->base.size, args->tiling_mode)) { 304 drm_gem_object_unreference_unlocked(&obj->base); 305 return -EINVAL; 306 } 307 308 if (obj->pin_count) { 309 drm_gem_object_unreference_unlocked(&obj->base); 310 return -EBUSY; 311 } 312 313 if (args->tiling_mode == I915_TILING_NONE) { 314 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 315 args->stride = 0; 316 } else { 317 if (args->tiling_mode == I915_TILING_X) 318 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 319 else 320 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 321 322 /* Hide bit 17 swizzling from the user. This prevents old Mesa 323 * from aborting the application on sw fallbacks to bit 17, 324 * and we use the pread/pwrite bit17 paths to swizzle for it. 325 * If there was a user that was relying on the swizzle 326 * information for drm_intel_bo_map()ed reads/writes this would 327 * break it, but we don't have any of those. 328 */ 329 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 330 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 331 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 332 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 333 334 /* If we can't handle the swizzling, make it untiled. */ 335 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 336 args->tiling_mode = I915_TILING_NONE; 337 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 338 args->stride = 0; 339 } 340 } 341 342 mutex_lock(&dev->struct_mutex); 343 if (args->tiling_mode != obj->tiling_mode || 344 args->stride != obj->stride) { 345 /* We need to rebind the object if its current allocation 346 * no longer meets the alignment restrictions for its new 347 * tiling mode. Otherwise we can just leave it alone, but 348 * need to ensure that any fence register is updated before 349 * the next fenced (either through the GTT or by the BLT unit 350 * on older GPUs) access. 351 * 352 * After updating the tiling parameters, we then flag whether 353 * we need to update an associated fence register. Note this 354 * has to also include the unfenced register the GPU uses 355 * whilst executing a fenced command for an untiled object. 356 */ 357 358 obj->map_and_fenceable = 359 obj->gtt_space == NULL || 360 (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && 361 i915_gem_object_fence_ok(obj, args->tiling_mode)); 362 363 /* Rebind if we need a change of alignment */ 364 if (!obj->map_and_fenceable) { 365 u32 unfenced_alignment = 366 i915_gem_get_gtt_alignment(dev, obj->base.size, 367 args->tiling_mode, 368 false); 369 if (obj->gtt_offset & (unfenced_alignment - 1)) 370 ret = i915_gem_object_unbind(obj); 371 } 372 373 if (ret == 0) { 374 obj->fence_dirty = 375 obj->fenced_gpu_access || 376 obj->fence_reg != I915_FENCE_REG_NONE; 377 378 obj->tiling_mode = args->tiling_mode; 379 obj->stride = args->stride; 380 381 /* Force the fence to be reacquired for GTT access */ 382 i915_gem_release_mmap(obj); 383 } 384 } 385 /* we have to maintain this existing ABI... */ 386 args->stride = obj->stride; 387 args->tiling_mode = obj->tiling_mode; 388 389 /* Try to preallocate memory required to save swizzling on put-pages */ 390 if (i915_gem_object_needs_bit17_swizzle(obj)) { 391 if (obj->bit_17 == NULL) { 392 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 393 sizeof(long), M_DRM, M_WAITOK); 394 } 395 } else { 396 drm_free(obj->bit_17, M_DRM); 397 obj->bit_17 = NULL; 398 } 399 400 drm_gem_object_unreference(&obj->base); 401 mutex_unlock(&dev->struct_mutex); 402 403 return ret; 404 } 405 406 /** 407 * Returns the current tiling mode and required bit 6 swizzling for the object. 408 */ 409 int 410 i915_gem_get_tiling(struct drm_device *dev, void *data, 411 struct drm_file *file) 412 { 413 struct drm_i915_gem_get_tiling *args = data; 414 drm_i915_private_t *dev_priv = dev->dev_private; 415 struct drm_i915_gem_object *obj; 416 417 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 418 if (&obj->base == NULL) 419 return -ENOENT; 420 421 mutex_lock(&dev->struct_mutex); 422 423 args->tiling_mode = obj->tiling_mode; 424 switch (obj->tiling_mode) { 425 case I915_TILING_X: 426 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 427 break; 428 case I915_TILING_Y: 429 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 430 break; 431 case I915_TILING_NONE: 432 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 433 break; 434 default: 435 DRM_ERROR("unknown tiling mode\n"); 436 } 437 438 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 439 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 440 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 441 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 442 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 443 444 drm_gem_object_unreference(&obj->base); 445 mutex_unlock(&dev->struct_mutex); 446 447 return 0; 448 } 449 450 /** 451 * Swap every 64 bytes of this page around, to account for it having a new 452 * bit 17 of its physical address and therefore being interpreted differently 453 * by the GPU. 454 */ 455 static void 456 i915_gem_swizzle_page(struct vm_page *page) 457 { 458 char temp[64]; 459 char *vaddr; 460 int i; 461 462 vaddr = kmap(page); 463 464 for (i = 0; i < PAGE_SIZE; i += 128) { 465 memcpy(temp, &vaddr[i], 64); 466 memcpy(&vaddr[i], &vaddr[i + 64], 64); 467 memcpy(&vaddr[i + 64], temp, 64); 468 } 469 470 kunmap(page); 471 } 472 473 void 474 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 475 { 476 int page_count = obj->base.size >> PAGE_SHIFT; 477 int i; 478 479 if (obj->bit_17 == NULL) 480 return; 481 482 for (i = 0; i < page_count; i++) { 483 char new_bit_17 = VM_PAGE_TO_PHYS(obj->pages[i]) >> 17; 484 if ((new_bit_17 & 0x1) != 485 (test_bit(i, obj->bit_17) != 0)) { 486 i915_gem_swizzle_page(obj->pages[i]); 487 vm_page_dirty(obj->pages[i]); 488 } 489 } 490 } 491 492 void 493 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 494 { 495 int page_count = obj->base.size >> PAGE_SHIFT; 496 int i; 497 498 if (obj->bit_17 == NULL) { 499 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 500 sizeof(long), M_DRM, M_WAITOK); 501 if (obj->bit_17 == NULL) { 502 DRM_ERROR("Failed to allocate memory for bit 17 " 503 "record\n"); 504 return; 505 } 506 } 507 508 for (i = 0; i < page_count; i++) { 509 if (VM_PAGE_TO_PHYS(obj->pages[i]) & (1 << 17)) 510 __set_bit(i, obj->bit_17); 511 else 512 __clear_bit(i, obj->bit_17); 513 } 514 } 515