1 /* $NetBSD: qxl_release.c,v 1.2 2018/08/27 04:58:35 riastradh Exp $ */ 2 3 /* 4 * Copyright 2011 Red Hat, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * on the rights to use, copy, modify, merge, publish, distribute, sub 10 * license, and/or sell copies of the Software, and to permit persons to whom 11 * the Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: qxl_release.c,v 1.2 2018/08/27 04:58:35 riastradh Exp $"); 26 27 #include "qxl_drv.h" 28 #include "qxl_object.h" 29 #include <trace/events/fence.h> 30 31 /* 32 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 33 * into 256 byte chunks for now - gives 16 cmds per page. 34 * 35 * use an ida to index into the chunks? 36 */ 37 /* manage releaseables */ 38 /* stack them 16 high for now -drawable object is 191 */ 39 #define RELEASE_SIZE 256 40 #define RELEASES_PER_BO (4096 / RELEASE_SIZE) 41 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 42 #define SURFACE_RELEASE_SIZE 128 43 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) 44 45 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 46 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 47 48 static const char *qxl_get_driver_name(struct fence *fence) 49 { 50 return "qxl"; 51 } 52 53 static const char *qxl_get_timeline_name(struct fence *fence) 54 { 55 return "release"; 56 } 57 58 static bool qxl_nop_signaling(struct fence *fence) 59 { 60 /* fences are always automatically signaled, so just pretend we did this.. */ 61 return true; 62 } 63 64 static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout) 65 { 66 struct qxl_device *qdev; 67 struct qxl_release *release; 68 int count = 0, sc = 0; 69 bool have_drawable_releases; 70 unsigned long cur, end = jiffies + timeout; 71 72 qdev = container_of(fence->lock, struct qxl_device, release_lock); 73 release = container_of(fence, struct qxl_release, base); 74 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; 75 76 retry: 77 sc++; 78 79 if (fence_is_signaled(fence)) 80 goto signaled; 81 82 qxl_io_notify_oom(qdev); 83 84 for (count = 0; count < 11; count++) { 85 if (!qxl_queue_garbage_collect(qdev, true)) 86 break; 87 88 if (fence_is_signaled(fence)) 89 goto signaled; 90 } 91 92 if (fence_is_signaled(fence)) 93 goto signaled; 94 95 if (have_drawable_releases || sc < 4) { 96 if (sc > 2) 97 /* back off */ 98 usleep_range(500, 1000); 99 100 if (time_after(jiffies, end)) 101 return 0; 102 103 if (have_drawable_releases && sc > 300) { 104 FENCE_WARN(fence, "failed to wait on release %d " 105 "after spincount %d\n", 106 fence->context & ~0xf0000000, sc); 107 goto signaled; 108 } 109 goto retry; 110 } 111 /* 112 * yeah, original sync_obj_wait gave up after 3 spins when 113 * have_drawable_releases is not set. 114 */ 115 116 signaled: 117 cur = jiffies; 118 if (time_after(cur, end)) 119 return 0; 120 return end - cur; 121 } 122 123 static const struct fence_ops qxl_fence_ops = { 124 .get_driver_name = qxl_get_driver_name, 125 .get_timeline_name = qxl_get_timeline_name, 126 .enable_signaling = qxl_nop_signaling, 127 .wait = qxl_fence_wait, 128 }; 129 130 static int 131 qxl_release_alloc(struct qxl_device *qdev, int type, 132 struct qxl_release **ret) 133 { 134 struct qxl_release *release; 135 int handle; 136 size_t size = sizeof(*release); 137 138 release = kmalloc(size, GFP_KERNEL); 139 if (!release) { 140 DRM_ERROR("Out of memory\n"); 141 return 0; 142 } 143 release->base.ops = NULL; 144 release->type = type; 145 release->release_offset = 0; 146 release->surface_release_id = 0; 147 INIT_LIST_HEAD(&release->bos); 148 149 idr_preload(GFP_KERNEL); 150 spin_lock(&qdev->release_idr_lock); 151 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 152 release->base.seqno = ++qdev->release_seqno; 153 spin_unlock(&qdev->release_idr_lock); 154 idr_preload_end(); 155 if (handle < 0) { 156 kfree(release); 157 *ret = NULL; 158 return handle; 159 } 160 *ret = release; 161 QXL_INFO(qdev, "allocated release %d\n", handle); 162 release->id = handle; 163 return handle; 164 } 165 166 static void 167 qxl_release_free_list(struct qxl_release *release) 168 { 169 while (!list_empty(&release->bos)) { 170 struct qxl_bo_list *entry; 171 struct qxl_bo *bo; 172 173 entry = container_of(release->bos.next, 174 struct qxl_bo_list, tv.head); 175 bo = to_qxl_bo(entry->tv.bo); 176 qxl_bo_unref(&bo); 177 list_del(&entry->tv.head); 178 kfree(entry); 179 } 180 } 181 182 void 183 qxl_release_free(struct qxl_device *qdev, 184 struct qxl_release *release) 185 { 186 QXL_INFO(qdev, "release %d, type %d\n", release->id, 187 release->type); 188 189 if (release->surface_release_id) 190 qxl_surface_id_dealloc(qdev, release->surface_release_id); 191 192 spin_lock(&qdev->release_idr_lock); 193 idr_remove(&qdev->release_idr, release->id); 194 spin_unlock(&qdev->release_idr_lock); 195 196 if (release->base.ops) { 197 WARN_ON(list_empty(&release->bos)); 198 qxl_release_free_list(release); 199 200 fence_signal(&release->base); 201 fence_put(&release->base); 202 } else { 203 qxl_release_free_list(release); 204 kfree(release); 205 } 206 } 207 208 static int qxl_release_bo_alloc(struct qxl_device *qdev, 209 struct qxl_bo **bo) 210 { 211 int ret; 212 /* pin releases bo's they are too messy to evict */ 213 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, 214 QXL_GEM_DOMAIN_VRAM, NULL, 215 bo); 216 return ret; 217 } 218 219 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 220 { 221 struct qxl_bo_list *entry; 222 223 list_for_each_entry(entry, &release->bos, tv.head) { 224 if (entry->tv.bo == &bo->tbo) 225 return 0; 226 } 227 228 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 229 if (!entry) 230 return -ENOMEM; 231 232 qxl_bo_ref(bo); 233 entry->tv.bo = &bo->tbo; 234 entry->tv.shared = false; 235 list_add_tail(&entry->tv.head, &release->bos); 236 return 0; 237 } 238 239 static int qxl_release_validate_bo(struct qxl_bo *bo) 240 { 241 int ret; 242 243 if (!bo->pin_count) { 244 qxl_ttm_placement_from_domain(bo, bo->type, false); 245 ret = ttm_bo_validate(&bo->tbo, &bo->placement, 246 true, false); 247 if (ret) 248 return ret; 249 } 250 251 ret = reservation_object_reserve_shared(bo->tbo.resv); 252 if (ret) 253 return ret; 254 255 /* allocate a surface for reserved + validated buffers */ 256 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 257 if (ret) 258 return ret; 259 return 0; 260 } 261 262 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) 263 { 264 int ret; 265 struct qxl_bo_list *entry; 266 267 /* if only one object on the release its the release itself 268 since these objects are pinned no need to reserve */ 269 if (list_is_singular(&release->bos)) 270 return 0; 271 272 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, 273 !no_intr, NULL); 274 if (ret) 275 return ret; 276 277 list_for_each_entry(entry, &release->bos, tv.head) { 278 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 279 280 ret = qxl_release_validate_bo(bo); 281 if (ret) { 282 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 283 return ret; 284 } 285 } 286 return 0; 287 } 288 289 void qxl_release_backoff_reserve_list(struct qxl_release *release) 290 { 291 /* if only one object on the release its the release itself 292 since these objects are pinned no need to reserve */ 293 if (list_is_singular(&release->bos)) 294 return; 295 296 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 297 } 298 299 300 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 301 enum qxl_surface_cmd_type surface_cmd_type, 302 struct qxl_release *create_rel, 303 struct qxl_release **release) 304 { 305 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 306 int idr_ret; 307 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); 308 struct qxl_bo *bo; 309 union qxl_release_info *info; 310 311 /* stash the release after the create command */ 312 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 313 if (idr_ret < 0) 314 return idr_ret; 315 bo = to_qxl_bo(entry->tv.bo); 316 317 (*release)->release_offset = create_rel->release_offset + 64; 318 319 qxl_release_list_add(*release, bo); 320 321 info = qxl_release_map(qdev, *release); 322 info->id = idr_ret; 323 qxl_release_unmap(qdev, *release, info); 324 return 0; 325 } 326 327 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 328 QXL_RELEASE_SURFACE_CMD, release, NULL); 329 } 330 331 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 332 int type, struct qxl_release **release, 333 struct qxl_bo **rbo) 334 { 335 struct qxl_bo *bo; 336 int idr_ret; 337 int ret = 0; 338 union qxl_release_info *info; 339 int cur_idx; 340 341 if (type == QXL_RELEASE_DRAWABLE) 342 cur_idx = 0; 343 else if (type == QXL_RELEASE_SURFACE_CMD) 344 cur_idx = 1; 345 else if (type == QXL_RELEASE_CURSOR_CMD) 346 cur_idx = 2; 347 else { 348 DRM_ERROR("got illegal type: %d\n", type); 349 return -EINVAL; 350 } 351 352 idr_ret = qxl_release_alloc(qdev, type, release); 353 if (idr_ret < 0) { 354 if (rbo) 355 *rbo = NULL; 356 return idr_ret; 357 } 358 359 mutex_lock(&qdev->release_mutex); 360 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 361 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); 362 qdev->current_release_bo_offset[cur_idx] = 0; 363 qdev->current_release_bo[cur_idx] = NULL; 364 } 365 if (!qdev->current_release_bo[cur_idx]) { 366 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); 367 if (ret) { 368 mutex_unlock(&qdev->release_mutex); 369 qxl_release_free(qdev, *release); 370 return ret; 371 } 372 } 373 374 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 375 376 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 377 qdev->current_release_bo_offset[cur_idx]++; 378 379 if (rbo) 380 *rbo = bo; 381 382 mutex_unlock(&qdev->release_mutex); 383 384 ret = qxl_release_list_add(*release, bo); 385 qxl_bo_unref(&bo); 386 if (ret) { 387 qxl_release_free(qdev, *release); 388 return ret; 389 } 390 391 info = qxl_release_map(qdev, *release); 392 info->id = idr_ret; 393 qxl_release_unmap(qdev, *release, info); 394 395 return ret; 396 } 397 398 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 399 uint64_t id) 400 { 401 struct qxl_release *release; 402 403 spin_lock(&qdev->release_idr_lock); 404 release = idr_find(&qdev->release_idr, id); 405 spin_unlock(&qdev->release_idr_lock); 406 if (!release) { 407 DRM_ERROR("failed to find id in release_idr\n"); 408 return NULL; 409 } 410 411 return release; 412 } 413 414 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 415 struct qxl_release *release) 416 { 417 void *ptr; 418 union qxl_release_info *info; 419 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 420 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 421 422 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 423 if (!ptr) 424 return NULL; 425 info = ptr + (release->release_offset & ~PAGE_SIZE); 426 return info; 427 } 428 429 void qxl_release_unmap(struct qxl_device *qdev, 430 struct qxl_release *release, 431 union qxl_release_info *info) 432 { 433 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 434 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 435 void *ptr; 436 437 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 438 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 439 } 440 441 void qxl_release_fence_buffer_objects(struct qxl_release *release) 442 { 443 struct ttm_buffer_object *bo; 444 struct ttm_bo_global *glob; 445 struct ttm_bo_device *bdev; 446 struct ttm_bo_driver *driver; 447 struct qxl_bo *qbo; 448 struct ttm_validate_buffer *entry; 449 struct qxl_device *qdev; 450 451 /* if only one object on the release its the release itself 452 since these objects are pinned no need to reserve */ 453 if (list_is_singular(&release->bos) || list_empty(&release->bos)) 454 return; 455 456 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 457 bdev = bo->bdev; 458 qdev = container_of(bdev, struct qxl_device, mman.bdev); 459 460 /* 461 * Since we never really allocated a context and we don't want to conflict, 462 * set the highest bits. This will break if we really allow exporting of dma-bufs. 463 */ 464 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, 465 release->id | 0xf0000000, release->base.seqno); 466 trace_fence_emit(&release->base); 467 468 driver = bdev->driver; 469 glob = bo->glob; 470 471 spin_lock(&glob->lru_lock); 472 473 list_for_each_entry(entry, &release->bos, head) { 474 bo = entry->bo; 475 qbo = to_qxl_bo(bo); 476 477 reservation_object_add_shared_fence(bo->resv, &release->base); 478 ttm_bo_add_to_lru(bo); 479 __ttm_bo_unreserve(bo); 480 } 481 spin_unlock(&glob->lru_lock); 482 ww_acquire_fini(&release->ticket); 483 } 484 485