17f4dd379Sjsg /* SPDX-License-Identifier: GPL-2.0 OR MIT */
21099013bSjsg /**************************************************************************
31099013bSjsg *
41099013bSjsg * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
51099013bSjsg * All Rights Reserved.
61099013bSjsg *
71099013bSjsg * Permission is hereby granted, free of charge, to any person obtaining a
81099013bSjsg * copy of this software and associated documentation files (the
91099013bSjsg * "Software"), to deal in the Software without restriction, including
101099013bSjsg * without limitation the rights to use, copy, modify, merge, publish,
111099013bSjsg * distribute, sub license, and/or sell copies of the Software, and to
121099013bSjsg * permit persons to whom the Software is furnished to do so, subject to
131099013bSjsg * the following conditions:
141099013bSjsg *
151099013bSjsg * The above copyright notice and this permission notice (including the
161099013bSjsg * next paragraph) shall be included in all copies or substantial portions
171099013bSjsg * of the Software.
181099013bSjsg *
191099013bSjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
201099013bSjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
211099013bSjsg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
221099013bSjsg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
231099013bSjsg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
241099013bSjsg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
251099013bSjsg * USE OR OTHER DEALINGS IN THE SOFTWARE.
261099013bSjsg *
271099013bSjsg **************************************************************************/
281099013bSjsg /*
291099013bSjsg * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
301099013bSjsg */
311099013bSjsg
321099013bSjsg #define pr_fmt(fmt) "[TTM] " fmt
331099013bSjsg
34f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
357f4dd379Sjsg #include <drm/ttm/ttm_placement.h>
36f005ef32Sjsg #include <drm/ttm/ttm_tt.h>
37f005ef32Sjsg
387f4dd379Sjsg #include <linux/jiffies.h>
397f4dd379Sjsg #include <linux/slab.h>
407f4dd379Sjsg #include <linux/sched.h>
417f4dd379Sjsg #include <linux/mm.h>
427f4dd379Sjsg #include <linux/file.h>
437f4dd379Sjsg #include <linux/module.h>
447f4dd379Sjsg #include <linux/atomic.h>
45c349dbc7Sjsg #include <linux/dma-resv.h>
461099013bSjsg
475ca02815Sjsg #include "ttm_module.h"
4863537ec6Sjsg
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)49e4d605f7Sjsg static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
501099013bSjsg struct ttm_placement *placement)
511099013bSjsg {
52c349dbc7Sjsg struct drm_printer p = drm_debug_printer(TTM_PFX);
53ad8b1aafSjsg struct ttm_resource_manager *man;
54ad8b1aafSjsg int i, mem_type;
551099013bSjsg
561099013bSjsg for (i = 0; i < placement->num_placement; i++) {
57ad8b1aafSjsg mem_type = placement->placement[i].mem_type;
58c349dbc7Sjsg drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
597ccd5a2cSjsg i, placement->placement[i].flags, mem_type);
60ad8b1aafSjsg man = ttm_manager_type(bo->bdev, mem_type);
61ad8b1aafSjsg ttm_resource_manager_debug(man, &p);
621099013bSjsg }
631099013bSjsg }
641099013bSjsg
651bb76ff1Sjsg /**
661bb76ff1Sjsg * ttm_bo_move_to_lru_tail
671bb76ff1Sjsg *
681bb76ff1Sjsg * @bo: The buffer object.
691bb76ff1Sjsg *
701bb76ff1Sjsg * Move this BO to the tail of all lru lists used to lookup and reserve an
711bb76ff1Sjsg * object. This function must be called with struct ttm_global::lru_lock
721bb76ff1Sjsg * held, and is used to make a BO less likely to be considered for eviction.
731bb76ff1Sjsg */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)741bb76ff1Sjsg void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
751099013bSjsg {
76c349dbc7Sjsg dma_resv_assert_held(bo->base.resv);
771099013bSjsg
781bb76ff1Sjsg if (bo->resource)
791bb76ff1Sjsg ttm_resource_move_to_lru_tail(bo->resource);
801099013bSjsg }
817f4dd379Sjsg EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
821099013bSjsg
831bb76ff1Sjsg /**
841bb76ff1Sjsg * ttm_bo_set_bulk_move - update BOs bulk move object
851bb76ff1Sjsg *
861bb76ff1Sjsg * @bo: The buffer object.
87f005ef32Sjsg * @bulk: bulk move structure
881bb76ff1Sjsg *
891bb76ff1Sjsg * Update the BOs bulk move object, making sure that resources are added/removed
901bb76ff1Sjsg * as well. A bulk move allows to move many resource on the LRU at once,
911bb76ff1Sjsg * resulting in much less overhead of maintaining the LRU.
921bb76ff1Sjsg * The only requirement is that the resources stay together on the LRU and are
931bb76ff1Sjsg * never separated. This is enforces by setting the bulk_move structure on a BO.
941bb76ff1Sjsg * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
951bb76ff1Sjsg * their LRU list.
961bb76ff1Sjsg */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)971bb76ff1Sjsg void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
981bb76ff1Sjsg struct ttm_lru_bulk_move *bulk)
99c349dbc7Sjsg {
1001bb76ff1Sjsg dma_resv_assert_held(bo->base.resv);
101c349dbc7Sjsg
1021bb76ff1Sjsg if (bo->bulk_move == bulk)
1031bb76ff1Sjsg return;
104c349dbc7Sjsg
1051bb76ff1Sjsg spin_lock(&bo->bdev->lru_lock);
1061bb76ff1Sjsg if (bo->resource)
1071bb76ff1Sjsg ttm_resource_del_bulk_move(bo->resource, bo);
1081bb76ff1Sjsg bo->bulk_move = bulk;
1091bb76ff1Sjsg if (bo->resource)
1101bb76ff1Sjsg ttm_resource_add_bulk_move(bo->resource, bo);
1111bb76ff1Sjsg spin_unlock(&bo->bdev->lru_lock);
112c349dbc7Sjsg }
1131bb76ff1Sjsg EXPORT_SYMBOL(ttm_bo_set_bulk_move);
114c349dbc7Sjsg
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)115e4d605f7Sjsg static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
116ad8b1aafSjsg struct ttm_resource *mem, bool evict,
1175ca02815Sjsg struct ttm_operation_ctx *ctx,
1185ca02815Sjsg struct ttm_place *hop)
1191099013bSjsg {
1205ca02815Sjsg struct ttm_device *bdev = bo->bdev;
1211bb76ff1Sjsg bool old_use_tt, new_use_tt;
122ad8b1aafSjsg int ret;
1231099013bSjsg
124f005ef32Sjsg old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
1251bb76ff1Sjsg new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
1265ca02815Sjsg
127ad8b1aafSjsg ttm_bo_unmap_virtual(bo);
1281099013bSjsg
1291099013bSjsg /*
1301099013bSjsg * Create and bind a ttm if required.
1311099013bSjsg */
1321099013bSjsg
1331bb76ff1Sjsg if (new_use_tt) {
134ad8b1aafSjsg /* Zero init the new TTM structure if the old location should
135ad8b1aafSjsg * have used one as well.
136ad8b1aafSjsg */
1371bb76ff1Sjsg ret = ttm_tt_create(bo, old_use_tt);
1381099013bSjsg if (ret)
1391099013bSjsg goto out_err;
1401099013bSjsg
1411099013bSjsg if (mem->mem_type != TTM_PL_SYSTEM) {
1425ca02815Sjsg ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
1431099013bSjsg if (ret)
1441099013bSjsg goto out_err;
1451099013bSjsg }
1461099013bSjsg }
1471099013bSjsg
1481bb76ff1Sjsg ret = dma_resv_reserve_fences(bo->base.resv, 1);
1491bb76ff1Sjsg if (ret)
1501bb76ff1Sjsg goto out_err;
1511bb76ff1Sjsg
1525ca02815Sjsg ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
1531099013bSjsg if (ret) {
1545ca02815Sjsg if (ret == -EMULTIHOP)
1555ca02815Sjsg return ret;
1561099013bSjsg goto out_err;
1571099013bSjsg }
1581099013bSjsg
1595ca02815Sjsg ctx->bytes_moved += bo->base.size;
1601099013bSjsg return 0;
1611099013bSjsg
1621099013bSjsg out_err:
1631bb76ff1Sjsg if (!old_use_tt)
164ad8b1aafSjsg ttm_bo_tt_destroy(bo);
1651099013bSjsg
1661099013bSjsg return ret;
1671099013bSjsg }
1681099013bSjsg
1695ca02815Sjsg /*
1701099013bSjsg * Call bo::reserved.
1711099013bSjsg * Will release GPU memory type usage on destruction.
1721099013bSjsg * This is the place to put in driver specific hooks to release
1731099013bSjsg * driver private resources.
1741099013bSjsg * Will release the bo::reserved lock.
1751099013bSjsg */
1761099013bSjsg
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)177e4d605f7Sjsg static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
1781099013bSjsg {
1795ca02815Sjsg if (bo->bdev->funcs->delete_mem_notify)
1805ca02815Sjsg bo->bdev->funcs->delete_mem_notify(bo);
1811099013bSjsg
182ad8b1aafSjsg ttm_bo_tt_destroy(bo);
1835ca02815Sjsg ttm_resource_free(bo, &bo->resource);
1847f4dd379Sjsg }
1851099013bSjsg
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)1867f4dd379Sjsg static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
1877f4dd379Sjsg {
1887f4dd379Sjsg int r;
1897f4dd379Sjsg
190c349dbc7Sjsg if (bo->base.resv == &bo->base._resv)
1917f4dd379Sjsg return 0;
1927f4dd379Sjsg
193c349dbc7Sjsg BUG_ON(!dma_resv_trylock(&bo->base._resv));
1947f4dd379Sjsg
195c349dbc7Sjsg r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
196c349dbc7Sjsg dma_resv_unlock(&bo->base._resv);
1977f4dd379Sjsg if (r)
198c349dbc7Sjsg return r;
199c349dbc7Sjsg
200c349dbc7Sjsg if (bo->type != ttm_bo_type_sg) {
201c349dbc7Sjsg /* This works because the BO is about to be destroyed and nobody
202c349dbc7Sjsg * reference it any more. The only tricky case is the trylock on
203c349dbc7Sjsg * the resv object while holding the lru_lock.
204c349dbc7Sjsg */
2055ca02815Sjsg spin_lock(&bo->bdev->lru_lock);
206c349dbc7Sjsg bo->base.resv = &bo->base._resv;
2075ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
208c349dbc7Sjsg }
2097f4dd379Sjsg
2107f4dd379Sjsg return r;
2117ccd5a2cSjsg }
2121099013bSjsg
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)2137ccd5a2cSjsg static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
2147ccd5a2cSjsg {
215c349dbc7Sjsg struct dma_resv *resv = &bo->base._resv;
2161bb76ff1Sjsg struct dma_resv_iter cursor;
2177f4dd379Sjsg struct dma_fence *fence;
2187ccd5a2cSjsg
2191bb76ff1Sjsg dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
2201bb76ff1Sjsg dma_resv_for_each_fence_unlocked(&cursor, fence) {
2217ccd5a2cSjsg if (!fence->ops->signaled)
2227f4dd379Sjsg dma_fence_enable_sw_signaling(fence);
2237ccd5a2cSjsg }
2241bb76ff1Sjsg dma_resv_iter_end(&cursor);
2251099013bSjsg }
2261099013bSjsg
2271099013bSjsg /**
2285ca02815Sjsg * ttm_bo_cleanup_refs
229c349dbc7Sjsg * If bo idle, remove from lru lists, and unref.
230c349dbc7Sjsg * If not idle, block if possible.
2311099013bSjsg *
2321099013bSjsg * Must be called with lru_lock and reservation held, this function
2337f4dd379Sjsg * will drop the lru lock and optionally the reservation lock before returning.
2341099013bSjsg *
2355ca02815Sjsg * @bo: The buffer object to clean-up
2365ca02815Sjsg * @interruptible: Any sleeps should occur interruptibly.
2375ca02815Sjsg * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
2385ca02815Sjsg * @unlock_resv: Unlock the reservation lock as well.
2391099013bSjsg */
2401099013bSjsg
ttm_bo_cleanup_refs(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_gpu,bool unlock_resv)2417f4dd379Sjsg static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
2427f4dd379Sjsg bool interruptible, bool no_wait_gpu,
2437f4dd379Sjsg bool unlock_resv)
2441099013bSjsg {
245c349dbc7Sjsg struct dma_resv *resv = &bo->base._resv;
2461099013bSjsg int ret;
2471099013bSjsg
2481bb76ff1Sjsg if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
2497f4dd379Sjsg ret = 0;
2507f4dd379Sjsg else
2517f4dd379Sjsg ret = -EBUSY;
2521099013bSjsg
2531099013bSjsg if (ret && !no_wait_gpu) {
2547ccd5a2cSjsg long lret;
2557f4dd379Sjsg
2567f4dd379Sjsg if (unlock_resv)
257c349dbc7Sjsg dma_resv_unlock(bo->base.resv);
2585ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
2591099013bSjsg
2601bb76ff1Sjsg lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
2611bb76ff1Sjsg interruptible,
2627ccd5a2cSjsg 30 * HZ);
2631099013bSjsg
2647ccd5a2cSjsg if (lret < 0)
2657ccd5a2cSjsg return lret;
2667ccd5a2cSjsg else if (lret == 0)
2677ccd5a2cSjsg return -EBUSY;
2681099013bSjsg
2695ca02815Sjsg spin_lock(&bo->bdev->lru_lock);
270c349dbc7Sjsg if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
2711099013bSjsg /*
2721099013bSjsg * We raced, and lost, someone else holds the reservation now,
2731099013bSjsg * and is probably busy in ttm_bo_cleanup_memtype_use.
2741099013bSjsg *
2751099013bSjsg * Even if it's not the case, because we finished waiting any
2761099013bSjsg * delayed destruction would succeed, so just return success
2771099013bSjsg * here.
2781099013bSjsg */
2795ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
2801099013bSjsg return 0;
2811099013bSjsg }
2827f4dd379Sjsg ret = 0;
2837ccd5a2cSjsg }
2841099013bSjsg
285f005ef32Sjsg if (ret) {
2867f4dd379Sjsg if (unlock_resv)
287c349dbc7Sjsg dma_resv_unlock(bo->base.resv);
2885ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
2891099013bSjsg return ret;
2901099013bSjsg }
2911099013bSjsg
2925ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
2931099013bSjsg ttm_bo_cleanup_memtype_use(bo);
2941099013bSjsg
2957f4dd379Sjsg if (unlock_resv)
296c349dbc7Sjsg dma_resv_unlock(bo->base.resv);
297c349dbc7Sjsg
2981099013bSjsg return 0;
2991099013bSjsg }
3001099013bSjsg
3015ca02815Sjsg /*
302f005ef32Sjsg * Block for the dma_resv object to become idle, lock the buffer and clean up
303f005ef32Sjsg * the resource and tt object.
3041099013bSjsg */
ttm_bo_delayed_delete(struct work_struct * work)305f005ef32Sjsg static void ttm_bo_delayed_delete(struct work_struct *work)
3061099013bSjsg {
3077f4dd379Sjsg struct ttm_buffer_object *bo;
3081099013bSjsg
309f005ef32Sjsg bo = container_of(work, typeof(*bo), delayed_delete);
3101099013bSjsg
311f005ef32Sjsg dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
312f005ef32Sjsg MAX_SCHEDULE_TIMEOUT);
313c349dbc7Sjsg dma_resv_lock(bo->base.resv, NULL);
314f005ef32Sjsg ttm_bo_cleanup_memtype_use(bo);
315f005ef32Sjsg dma_resv_unlock(bo->base.resv);
316c349dbc7Sjsg ttm_bo_put(bo);
3171099013bSjsg }
3181099013bSjsg
ttm_bo_release(struct kref * kref)31963537ec6Sjsg static void ttm_bo_release(struct kref *kref)
3201099013bSjsg {
32163537ec6Sjsg struct ttm_buffer_object *bo =
32263537ec6Sjsg container_of(kref, struct ttm_buffer_object, kref);
3235ca02815Sjsg struct ttm_device *bdev = bo->bdev;
324c349dbc7Sjsg int ret;
3251099013bSjsg
3265ca02815Sjsg WARN_ON_ONCE(bo->pin_count);
3271bb76ff1Sjsg WARN_ON_ONCE(bo->bulk_move);
3285ca02815Sjsg
329c349dbc7Sjsg if (!bo->deleted) {
330c349dbc7Sjsg ret = ttm_bo_individualize_resv(bo);
331c349dbc7Sjsg if (ret) {
332c349dbc7Sjsg /* Last resort, if we fail to allocate memory for the
333c349dbc7Sjsg * fences block for the BO to become idle
334c349dbc7Sjsg */
3351bb76ff1Sjsg dma_resv_wait_timeout(bo->base.resv,
3361bb76ff1Sjsg DMA_RESV_USAGE_BOOKKEEP, false,
337c349dbc7Sjsg 30 * HZ);
338c349dbc7Sjsg }
339c349dbc7Sjsg
3405ca02815Sjsg if (bo->bdev->funcs->release_notify)
3415ca02815Sjsg bo->bdev->funcs->release_notify(bo);
342c349dbc7Sjsg
343c349dbc7Sjsg drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
3445ca02815Sjsg ttm_mem_io_free(bdev, bo->resource);
345c349dbc7Sjsg
346f005ef32Sjsg if (!dma_resv_test_signaled(bo->base.resv,
347f005ef32Sjsg DMA_RESV_USAGE_BOOKKEEP) ||
348f005ef32Sjsg (want_init_on_free() && (bo->ttm != NULL)) ||
349ad8b1aafSjsg !dma_resv_trylock(bo->base.resv)) {
350c349dbc7Sjsg /* The BO is not idle, resurrect it for delayed destroy */
351c349dbc7Sjsg ttm_bo_flush_all_fences(bo);
352c349dbc7Sjsg bo->deleted = true;
353c349dbc7Sjsg
3545ca02815Sjsg spin_lock(&bo->bdev->lru_lock);
355c349dbc7Sjsg
356c349dbc7Sjsg /*
3575ca02815Sjsg * Make pinned bos immediately available to
358c349dbc7Sjsg * shrinkers, now that they are queued for
359c349dbc7Sjsg * destruction.
3605ca02815Sjsg *
3615ca02815Sjsg * FIXME: QXL is triggering this. Can be removed when the
3625ca02815Sjsg * driver is fixed.
363c349dbc7Sjsg */
3645ca02815Sjsg if (bo->pin_count) {
3655ca02815Sjsg bo->pin_count = 0;
3661bb76ff1Sjsg ttm_resource_move_to_lru_tail(bo->resource);
367c349dbc7Sjsg }
368c349dbc7Sjsg
369c349dbc7Sjsg kref_init(&bo->kref);
3705ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
371c349dbc7Sjsg
372f005ef32Sjsg INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
373f005ef32Sjsg queue_work(bdev->wq, &bo->delayed_delete);
374c349dbc7Sjsg return;
375c349dbc7Sjsg }
376c349dbc7Sjsg
377c349dbc7Sjsg ttm_bo_cleanup_memtype_use(bo);
378ad8b1aafSjsg dma_resv_unlock(bo->base.resv);
379f005ef32Sjsg }
380c349dbc7Sjsg
3815ca02815Sjsg atomic_dec(&ttm_glob.bo_count);
382c349dbc7Sjsg bo->destroy(bo);
3831099013bSjsg }
3841099013bSjsg
385f005ef32Sjsg /**
386f005ef32Sjsg * ttm_bo_put
387f005ef32Sjsg *
388f005ef32Sjsg * @bo: The buffer object.
389f005ef32Sjsg *
390f005ef32Sjsg * Unreference a buffer object.
391f005ef32Sjsg */
ttm_bo_put(struct ttm_buffer_object * bo)3927f4dd379Sjsg void ttm_bo_put(struct ttm_buffer_object *bo)
3937f4dd379Sjsg {
3947f4dd379Sjsg kref_put(&bo->kref, ttm_bo_release);
3957f4dd379Sjsg }
3967f4dd379Sjsg EXPORT_SYMBOL(ttm_bo_put);
3977f4dd379Sjsg
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx,struct ttm_place * hop)3985ca02815Sjsg static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
3995ca02815Sjsg struct ttm_resource **mem,
4005ca02815Sjsg struct ttm_operation_ctx *ctx,
4015ca02815Sjsg struct ttm_place *hop)
4025ca02815Sjsg {
4035ca02815Sjsg struct ttm_placement hop_placement;
4045ca02815Sjsg struct ttm_resource *hop_mem;
4055ca02815Sjsg int ret;
4065ca02815Sjsg
4075ca02815Sjsg hop_placement.num_placement = hop_placement.num_busy_placement = 1;
4085ca02815Sjsg hop_placement.placement = hop_placement.busy_placement = hop;
4095ca02815Sjsg
4105ca02815Sjsg /* find space in the bounce domain */
4115ca02815Sjsg ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
4125ca02815Sjsg if (ret)
4135ca02815Sjsg return ret;
4145ca02815Sjsg /* move to the bounce domain */
4155ca02815Sjsg ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
4165ca02815Sjsg if (ret) {
4175ca02815Sjsg ttm_resource_free(bo, &hop_mem);
4185ca02815Sjsg return ret;
4195ca02815Sjsg }
4205ca02815Sjsg return 0;
4215ca02815Sjsg }
4225ca02815Sjsg
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)4237f4dd379Sjsg static int ttm_bo_evict(struct ttm_buffer_object *bo,
4247f4dd379Sjsg struct ttm_operation_ctx *ctx)
4251099013bSjsg {
4265ca02815Sjsg struct ttm_device *bdev = bo->bdev;
4275ca02815Sjsg struct ttm_resource *evict_mem;
4281099013bSjsg struct ttm_placement placement;
4295ca02815Sjsg struct ttm_place hop;
4301099013bSjsg int ret = 0;
4311099013bSjsg
4325ca02815Sjsg memset(&hop, 0, sizeof(hop));
4335ca02815Sjsg
434c349dbc7Sjsg dma_resv_assert_held(bo->base.resv);
4351099013bSjsg
4367f4dd379Sjsg placement.num_placement = 0;
4377f4dd379Sjsg placement.num_busy_placement = 0;
4385ca02815Sjsg bdev->funcs->evict_flags(bo, &placement);
4391099013bSjsg
4407f4dd379Sjsg if (!placement.num_placement && !placement.num_busy_placement) {
441f005ef32Sjsg ret = ttm_bo_wait_ctx(bo, ctx);
4425ca02815Sjsg if (ret)
4435ca02815Sjsg return ret;
4447f4dd379Sjsg
4455ca02815Sjsg /*
4465ca02815Sjsg * Since we've already synced, this frees backing store
4475ca02815Sjsg * immediately.
4485ca02815Sjsg */
4495ca02815Sjsg return ttm_bo_pipeline_gutting(bo);
4507f4dd379Sjsg }
4511099013bSjsg
4527f4dd379Sjsg ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
4531099013bSjsg if (ret) {
454d765308cSjsg if (ret != -ERESTARTSYS) {
455d765308cSjsg pr_err("Failed to find memory space for buffer 0x%p eviction\n",
4561099013bSjsg bo);
4571099013bSjsg ttm_bo_mem_space_debug(bo, &placement);
4581099013bSjsg }
4591099013bSjsg goto out;
4601099013bSjsg }
4611099013bSjsg
4629e45a551Sjsg do {
4635ca02815Sjsg ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
4649e45a551Sjsg if (ret != -EMULTIHOP)
4659e45a551Sjsg break;
4669e45a551Sjsg
4675ca02815Sjsg ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
4689e45a551Sjsg } while (!ret);
4699e45a551Sjsg
4705ca02815Sjsg if (ret) {
4719e45a551Sjsg ttm_resource_free(bo, &evict_mem);
472d764d84dSjsg if (ret != -ERESTARTSYS && ret != -EINTR)
473d765308cSjsg pr_err("Buffer eviction failed\n");
4741099013bSjsg }
4751099013bSjsg out:
4761099013bSjsg return ret;
4771099013bSjsg }
4781099013bSjsg
479f005ef32Sjsg /**
480f005ef32Sjsg * ttm_bo_eviction_valuable
481f005ef32Sjsg *
482f005ef32Sjsg * @bo: The buffer object to evict
483f005ef32Sjsg * @place: the placement we need to make room for
484f005ef32Sjsg *
485f005ef32Sjsg * Check if it is valuable to evict the BO to make room for the given placement.
486f005ef32Sjsg */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)4877f4dd379Sjsg bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
4887f4dd379Sjsg const struct ttm_place *place)
4891099013bSjsg {
4901bb76ff1Sjsg struct ttm_resource *res = bo->resource;
4911bb76ff1Sjsg struct ttm_device *bdev = bo->bdev;
4921bb76ff1Sjsg
4935ca02815Sjsg dma_resv_assert_held(bo->base.resv);
4945ca02815Sjsg if (bo->resource->mem_type == TTM_PL_SYSTEM)
4955ca02815Sjsg return true;
4965ca02815Sjsg
4977ccd5a2cSjsg /* Don't evict this BO if it's outside of the
4987ccd5a2cSjsg * requested placement range
4997ccd5a2cSjsg */
5001bb76ff1Sjsg return ttm_resource_intersects(bdev, res, place, bo->base.size);
5017f4dd379Sjsg }
5027f4dd379Sjsg EXPORT_SYMBOL(ttm_bo_eviction_valuable);
5037f4dd379Sjsg
5045ca02815Sjsg /*
5057f4dd379Sjsg * Check the target bo is allowable to be evicted or swapout, including cases:
5067f4dd379Sjsg *
5077f4dd379Sjsg * a. if share same reservation object with ctx->resv, have assumption
5087f4dd379Sjsg * reservation objects should already be locked, so not lock again and
5097f4dd379Sjsg * return true directly when either the opreation allow_reserved_eviction
5107f4dd379Sjsg * or the target bo already is in delayed free list;
5117f4dd379Sjsg *
5127f4dd379Sjsg * b. Otherwise, trylock it.
5137f4dd379Sjsg */
ttm_bo_evict_swapout_allowable(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,const struct ttm_place * place,bool * locked,bool * busy)5147f4dd379Sjsg static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
5155ca02815Sjsg struct ttm_operation_ctx *ctx,
5165ca02815Sjsg const struct ttm_place *place,
5175ca02815Sjsg bool *locked, bool *busy)
5187f4dd379Sjsg {
5197f4dd379Sjsg bool ret = false;
5207f4dd379Sjsg
52126c6fd03Sjsg if (bo->pin_count) {
52226c6fd03Sjsg *locked = false;
523d2b1f886Sjsg if (busy)
52426c6fd03Sjsg *busy = false;
52526c6fd03Sjsg return false;
52626c6fd03Sjsg }
52726c6fd03Sjsg
528c349dbc7Sjsg if (bo->base.resv == ctx->resv) {
529c349dbc7Sjsg dma_resv_assert_held(bo->base.resv);
5305ca02815Sjsg if (ctx->allow_res_evict)
5317f4dd379Sjsg ret = true;
532c349dbc7Sjsg *locked = false;
533c349dbc7Sjsg if (busy)
534c349dbc7Sjsg *busy = false;
5357f4dd379Sjsg } else {
536c349dbc7Sjsg ret = dma_resv_trylock(bo->base.resv);
537c349dbc7Sjsg *locked = ret;
538c349dbc7Sjsg if (busy)
539c349dbc7Sjsg *busy = !ret;
5407f4dd379Sjsg }
5417f4dd379Sjsg
5421bb76ff1Sjsg if (ret && place && (bo->resource->mem_type != place->mem_type ||
5431bb76ff1Sjsg !bo->bdev->funcs->eviction_valuable(bo, place))) {
5445ca02815Sjsg ret = false;
5455ca02815Sjsg if (*locked) {
5465ca02815Sjsg dma_resv_unlock(bo->base.resv);
5475ca02815Sjsg *locked = false;
5485ca02815Sjsg }
5495ca02815Sjsg }
5505ca02815Sjsg
5517f4dd379Sjsg return ret;
5527f4dd379Sjsg }
5537f4dd379Sjsg
554c349dbc7Sjsg /**
555c349dbc7Sjsg * ttm_mem_evict_wait_busy - wait for a busy BO to become available
556c349dbc7Sjsg *
557c349dbc7Sjsg * @busy_bo: BO which couldn't be locked with trylock
558c349dbc7Sjsg * @ctx: operation context
559c349dbc7Sjsg * @ticket: acquire ticket
560c349dbc7Sjsg *
561c349dbc7Sjsg * Try to lock a busy buffer object to avoid failing eviction.
562c349dbc7Sjsg */
ttm_mem_evict_wait_busy(struct ttm_buffer_object * busy_bo,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket)563c349dbc7Sjsg static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
564c349dbc7Sjsg struct ttm_operation_ctx *ctx,
565c349dbc7Sjsg struct ww_acquire_ctx *ticket)
566c349dbc7Sjsg {
567c349dbc7Sjsg int r;
568c349dbc7Sjsg
569c349dbc7Sjsg if (!busy_bo || !ticket)
570c349dbc7Sjsg return -EBUSY;
571c349dbc7Sjsg
572c349dbc7Sjsg if (ctx->interruptible)
573c349dbc7Sjsg r = dma_resv_lock_interruptible(busy_bo->base.resv,
574c349dbc7Sjsg ticket);
575c349dbc7Sjsg else
576c349dbc7Sjsg r = dma_resv_lock(busy_bo->base.resv, ticket);
577c349dbc7Sjsg
578c349dbc7Sjsg /*
579c349dbc7Sjsg * TODO: It would be better to keep the BO locked until allocation is at
580c349dbc7Sjsg * least tried one more time, but that would mean a much larger rework
581c349dbc7Sjsg * of TTM.
582c349dbc7Sjsg */
583c349dbc7Sjsg if (!r)
584c349dbc7Sjsg dma_resv_unlock(busy_bo->base.resv);
585c349dbc7Sjsg
586c349dbc7Sjsg return r == -EDEADLK ? -EBUSY : r;
587c349dbc7Sjsg }
588c349dbc7Sjsg
ttm_mem_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket)5895ca02815Sjsg int ttm_mem_evict_first(struct ttm_device *bdev,
590ad8b1aafSjsg struct ttm_resource_manager *man,
5917f4dd379Sjsg const struct ttm_place *place,
592c349dbc7Sjsg struct ttm_operation_ctx *ctx,
593c349dbc7Sjsg struct ww_acquire_ctx *ticket)
5947f4dd379Sjsg {
595c349dbc7Sjsg struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
5961bb76ff1Sjsg struct ttm_resource_cursor cursor;
5971bb76ff1Sjsg struct ttm_resource *res;
5987f4dd379Sjsg bool locked = false;
5997f4dd379Sjsg int ret;
6007f4dd379Sjsg
6015ca02815Sjsg spin_lock(&bdev->lru_lock);
6021bb76ff1Sjsg ttm_resource_manager_for_each_res(man, &cursor, res) {
603c349dbc7Sjsg bool busy;
604c349dbc7Sjsg
6051bb76ff1Sjsg if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
6065ca02815Sjsg &locked, &busy)) {
607c349dbc7Sjsg if (busy && !busy_bo && ticket !=
6081bb76ff1Sjsg dma_resv_locking_ctx(res->bo->base.resv))
6091bb76ff1Sjsg busy_bo = res->bo;
6107f4dd379Sjsg continue;
611c349dbc7Sjsg }
6127f4dd379Sjsg
6131bb76ff1Sjsg if (ttm_bo_get_unless_zero(res->bo)) {
6141bb76ff1Sjsg bo = res->bo;
6151bb76ff1Sjsg break;
6161bb76ff1Sjsg }
617c349dbc7Sjsg if (locked)
6181bb76ff1Sjsg dma_resv_unlock(res->bo->base.resv);
6197ccd5a2cSjsg }
6201099013bSjsg
6217f4dd379Sjsg if (!bo) {
622c349dbc7Sjsg if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
623c349dbc7Sjsg busy_bo = NULL;
6245ca02815Sjsg spin_unlock(&bdev->lru_lock);
625c349dbc7Sjsg ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
626c349dbc7Sjsg if (busy_bo)
627c349dbc7Sjsg ttm_bo_put(busy_bo);
6281099013bSjsg return ret;
6291099013bSjsg }
6301099013bSjsg
631c349dbc7Sjsg if (bo->deleted) {
632c349dbc7Sjsg ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
633c349dbc7Sjsg ctx->no_wait_gpu, locked);
634c349dbc7Sjsg ttm_bo_put(bo);
635c349dbc7Sjsg return ret;
6367f4dd379Sjsg }
6371099013bSjsg
6385ca02815Sjsg spin_unlock(&bdev->lru_lock);
639c349dbc7Sjsg
640c349dbc7Sjsg ret = ttm_bo_evict(bo, ctx);
641c349dbc7Sjsg if (locked)
642c349dbc7Sjsg ttm_bo_unreserve(bo);
64361cb1332Sjsg else
64461cb1332Sjsg ttm_bo_move_to_lru_tail_unlocked(bo);
645c349dbc7Sjsg
646c349dbc7Sjsg ttm_bo_put(bo);
6471099013bSjsg return ret;
6481099013bSjsg }
6491099013bSjsg
6501bb76ff1Sjsg /**
6511bb76ff1Sjsg * ttm_bo_pin - Pin the buffer object.
6521bb76ff1Sjsg * @bo: The buffer object to pin
6531bb76ff1Sjsg *
6541bb76ff1Sjsg * Make sure the buffer is not evicted any more during memory pressure.
6551bb76ff1Sjsg * @bo must be unpinned again by calling ttm_bo_unpin().
6561bb76ff1Sjsg */
ttm_bo_pin(struct ttm_buffer_object * bo)6571bb76ff1Sjsg void ttm_bo_pin(struct ttm_buffer_object *bo)
6581bb76ff1Sjsg {
6591bb76ff1Sjsg dma_resv_assert_held(bo->base.resv);
6601bb76ff1Sjsg WARN_ON_ONCE(!kref_read(&bo->kref));
6611bb76ff1Sjsg spin_lock(&bo->bdev->lru_lock);
6621bb76ff1Sjsg if (bo->resource)
6631bb76ff1Sjsg ttm_resource_del_bulk_move(bo->resource, bo);
6641bb76ff1Sjsg ++bo->pin_count;
6651bb76ff1Sjsg spin_unlock(&bo->bdev->lru_lock);
6661bb76ff1Sjsg }
6671bb76ff1Sjsg EXPORT_SYMBOL(ttm_bo_pin);
6681bb76ff1Sjsg
6691bb76ff1Sjsg /**
6701bb76ff1Sjsg * ttm_bo_unpin - Unpin the buffer object.
6711bb76ff1Sjsg * @bo: The buffer object to unpin
6721bb76ff1Sjsg *
6731bb76ff1Sjsg * Allows the buffer object to be evicted again during memory pressure.
6741bb76ff1Sjsg */
ttm_bo_unpin(struct ttm_buffer_object * bo)6751bb76ff1Sjsg void ttm_bo_unpin(struct ttm_buffer_object *bo)
6761bb76ff1Sjsg {
6771bb76ff1Sjsg dma_resv_assert_held(bo->base.resv);
6781bb76ff1Sjsg WARN_ON_ONCE(!kref_read(&bo->kref));
6791bb76ff1Sjsg if (WARN_ON_ONCE(!bo->pin_count))
6801bb76ff1Sjsg return;
6811bb76ff1Sjsg
6821bb76ff1Sjsg spin_lock(&bo->bdev->lru_lock);
6831bb76ff1Sjsg --bo->pin_count;
6841bb76ff1Sjsg if (bo->resource)
6851bb76ff1Sjsg ttm_resource_add_bulk_move(bo->resource, bo);
6861bb76ff1Sjsg spin_unlock(&bo->bdev->lru_lock);
6871bb76ff1Sjsg }
6881bb76ff1Sjsg EXPORT_SYMBOL(ttm_bo_unpin);
6891bb76ff1Sjsg
6905ca02815Sjsg /*
6911bb76ff1Sjsg * Add the last move fence to the BO as kernel dependency and reserve a new
6921bb76ff1Sjsg * fence slot.
6937f4dd379Sjsg */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,struct ttm_resource * mem,bool no_wait_gpu)6947f4dd379Sjsg static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
695ad8b1aafSjsg struct ttm_resource_manager *man,
696ad8b1aafSjsg struct ttm_resource *mem,
697c349dbc7Sjsg bool no_wait_gpu)
6987f4dd379Sjsg {
6997f4dd379Sjsg struct dma_fence *fence;
7007f4dd379Sjsg int ret;
7017f4dd379Sjsg
7027f4dd379Sjsg spin_lock(&man->move_lock);
7037f4dd379Sjsg fence = dma_fence_get(man->move);
7047f4dd379Sjsg spin_unlock(&man->move_lock);
7057f4dd379Sjsg
706c349dbc7Sjsg if (!fence)
707c349dbc7Sjsg return 0;
7087f4dd379Sjsg
709ff2b09e8Sjsg if (no_wait_gpu) {
7105ca02815Sjsg ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
711ff2b09e8Sjsg dma_fence_put(fence);
7125ca02815Sjsg return ret;
713ff2b09e8Sjsg }
714c349dbc7Sjsg
7151bb76ff1Sjsg dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
716c349dbc7Sjsg
7171bb76ff1Sjsg ret = dma_resv_reserve_fences(bo->base.resv, 1);
718c349dbc7Sjsg dma_fence_put(fence);
7197f4dd379Sjsg return ret;
720c349dbc7Sjsg }
7217f4dd379Sjsg
7225ca02815Sjsg /*
7231099013bSjsg * Repeatedly evict memory from the LRU for @mem_type until we create enough
7241099013bSjsg * space, or we've evicted everything and there isn't enough space.
7251099013bSjsg */
ttm_bo_mem_force_space(struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx)726e4d605f7Sjsg static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
7277ccd5a2cSjsg const struct ttm_place *place,
7285ca02815Sjsg struct ttm_resource **mem,
7297f4dd379Sjsg struct ttm_operation_ctx *ctx)
7301099013bSjsg {
7315ca02815Sjsg struct ttm_device *bdev = bo->bdev;
7325ca02815Sjsg struct ttm_resource_manager *man;
733c349dbc7Sjsg struct ww_acquire_ctx *ticket;
7341099013bSjsg int ret;
7351099013bSjsg
7365ca02815Sjsg man = ttm_manager_type(bdev, place->mem_type);
737c349dbc7Sjsg ticket = dma_resv_locking_ctx(bo->base.resv);
7381099013bSjsg do {
739ad8b1aafSjsg ret = ttm_resource_alloc(bo, place, mem);
740ad8b1aafSjsg if (likely(!ret))
7411099013bSjsg break;
742ad8b1aafSjsg if (unlikely(ret != -ENOSPC))
743ad8b1aafSjsg return ret;
744ad8b1aafSjsg ret = ttm_mem_evict_first(bdev, man, place, ctx,
745c349dbc7Sjsg ticket);
7461099013bSjsg if (unlikely(ret != 0))
7471099013bSjsg return ret;
7481099013bSjsg } while (1);
749c349dbc7Sjsg
7505ca02815Sjsg return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
7511099013bSjsg }
7521099013bSjsg
753f005ef32Sjsg /**
754f005ef32Sjsg * ttm_bo_mem_space
7551099013bSjsg *
756f005ef32Sjsg * @bo: Pointer to a struct ttm_buffer_object. the data of which
757f005ef32Sjsg * we want to allocate space for.
758f005ef32Sjsg * @placement: Proposed new placement for the buffer object.
759f005ef32Sjsg * @mem: A struct ttm_resource.
760f005ef32Sjsg * @ctx: if and how to sleep, lock buffers and alloc memory
761f005ef32Sjsg *
762f005ef32Sjsg * Allocate memory space for the buffer object pointed to by @bo, using
763f005ef32Sjsg * the placement flags in @placement, potentially evicting other idle buffer objects.
764f005ef32Sjsg * This function may sleep while waiting for space to become available.
765f005ef32Sjsg * Returns:
766f005ef32Sjsg * -EBUSY: No space available (only if no_wait == 1).
767*bd61c7e9Sjsg * -ENOSPC: Could not allocate space for the buffer object, either due to
768f005ef32Sjsg * fragmentation or concurrent allocators.
769f005ef32Sjsg * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
7701099013bSjsg */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** mem,struct ttm_operation_ctx * ctx)771e4d605f7Sjsg int ttm_bo_mem_space(struct ttm_buffer_object *bo,
7721099013bSjsg struct ttm_placement *placement,
7735ca02815Sjsg struct ttm_resource **mem,
7747f4dd379Sjsg struct ttm_operation_ctx *ctx)
7751099013bSjsg {
7765ca02815Sjsg struct ttm_device *bdev = bo->bdev;
7771099013bSjsg bool type_found = false;
7781099013bSjsg int i, ret;
7791099013bSjsg
7801bb76ff1Sjsg ret = dma_resv_reserve_fences(bo->base.resv, 1);
7817f4dd379Sjsg if (unlikely(ret))
7827f4dd379Sjsg return ret;
7837f4dd379Sjsg
7841099013bSjsg for (i = 0; i < placement->num_placement; ++i) {
7857ccd5a2cSjsg const struct ttm_place *place = &placement->placement[i];
786ad8b1aafSjsg struct ttm_resource_manager *man;
7877ccd5a2cSjsg
7885ca02815Sjsg man = ttm_manager_type(bdev, place->mem_type);
7895ca02815Sjsg if (!man || !ttm_resource_manager_used(man))
790ad8b1aafSjsg continue;
7911099013bSjsg
7927ccd5a2cSjsg type_found = true;
793ad8b1aafSjsg ret = ttm_resource_alloc(bo, place, mem);
794ad8b1aafSjsg if (ret == -ENOSPC)
795ad8b1aafSjsg continue;
7961099013bSjsg if (unlikely(ret))
797c349dbc7Sjsg goto error;
7987ccd5a2cSjsg
7995ca02815Sjsg ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
8007f4dd379Sjsg if (unlikely(ret)) {
801ad8b1aafSjsg ttm_resource_free(bo, mem);
802c349dbc7Sjsg if (ret == -EBUSY)
803c349dbc7Sjsg continue;
8041099013bSjsg
805c349dbc7Sjsg goto error;
806c349dbc7Sjsg }
8071099013bSjsg return 0;
8081099013bSjsg }
8091099013bSjsg
810f3eef2b6Sderaadt for (i = 0; i < placement->num_busy_placement; ++i) {
8117ccd5a2cSjsg const struct ttm_place *place = &placement->busy_placement[i];
8125ca02815Sjsg struct ttm_resource_manager *man;
8137ccd5a2cSjsg
8145ca02815Sjsg man = ttm_manager_type(bdev, place->mem_type);
8155ca02815Sjsg if (!man || !ttm_resource_manager_used(man))
816ad8b1aafSjsg continue;
8171099013bSjsg
8187ccd5a2cSjsg type_found = true;
819c349dbc7Sjsg ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
820ad8b1aafSjsg if (likely(!ret))
821c349dbc7Sjsg return 0;
822c349dbc7Sjsg
823c349dbc7Sjsg if (ret && ret != -EBUSY)
824c349dbc7Sjsg goto error;
8251099013bSjsg }
8261099013bSjsg
827*bd61c7e9Sjsg ret = -ENOSPC;
8287ccd5a2cSjsg if (!type_found) {
8297f4dd379Sjsg pr_err(TTM_PFX "No compatible memory type found\n");
830c349dbc7Sjsg ret = -EINVAL;
8317ccd5a2cSjsg }
8327ccd5a2cSjsg
833c349dbc7Sjsg error:
834c349dbc7Sjsg return ret;
8351099013bSjsg }
8361099013bSjsg EXPORT_SYMBOL(ttm_bo_mem_space);
8371099013bSjsg
ttm_bo_move_buffer(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)8387ccd5a2cSjsg static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
8391099013bSjsg struct ttm_placement *placement,
8407f4dd379Sjsg struct ttm_operation_ctx *ctx)
8411099013bSjsg {
8425ca02815Sjsg struct ttm_resource *mem;
8435ca02815Sjsg struct ttm_place hop;
8445ca02815Sjsg int ret;
8451099013bSjsg
846c349dbc7Sjsg dma_resv_assert_held(bo->base.resv);
8471099013bSjsg
8481099013bSjsg /*
8491099013bSjsg * Determine where to move the buffer.
8505ca02815Sjsg *
8515ca02815Sjsg * If driver determines move is going to need
8525ca02815Sjsg * an extra step then it will return -EMULTIHOP
8535ca02815Sjsg * and the buffer will be moved to the temporary
8545ca02815Sjsg * stop and the driver will be called to make
8555ca02815Sjsg * the second hop.
8561099013bSjsg */
8577f4dd379Sjsg ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
8581099013bSjsg if (ret)
8595ca02815Sjsg return ret;
8605ca02815Sjsg bounce:
8615ca02815Sjsg ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
8625ca02815Sjsg if (ret == -EMULTIHOP) {
8635ca02815Sjsg ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
8645ca02815Sjsg if (ret)
8655ca02815Sjsg goto out;
8665ca02815Sjsg /* try and move to final place now. */
8675ca02815Sjsg goto bounce;
8685ca02815Sjsg }
8695ca02815Sjsg out:
870ad8b1aafSjsg if (ret)
871ad8b1aafSjsg ttm_resource_free(bo, &mem);
8721099013bSjsg return ret;
8731099013bSjsg }
8741099013bSjsg
875f005ef32Sjsg /**
876f005ef32Sjsg * ttm_bo_validate
877f005ef32Sjsg *
878f005ef32Sjsg * @bo: The buffer object.
879f005ef32Sjsg * @placement: Proposed placement for the buffer object.
880f005ef32Sjsg * @ctx: validation parameters.
881f005ef32Sjsg *
882f005ef32Sjsg * Changes placement and caching policy of the buffer object
883f005ef32Sjsg * according proposed placement.
884f005ef32Sjsg * Returns
885f005ef32Sjsg * -EINVAL on invalid proposed placement.
886f005ef32Sjsg * -ENOMEM on out-of-memory condition.
887f005ef32Sjsg * -EBUSY if no_wait is true and buffer busy.
888f005ef32Sjsg * -ERESTARTSYS if interrupted by a signal.
889f005ef32Sjsg */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)890e4d605f7Sjsg int ttm_bo_validate(struct ttm_buffer_object *bo,
8911099013bSjsg struct ttm_placement *placement,
8927f4dd379Sjsg struct ttm_operation_ctx *ctx)
8931099013bSjsg {
8941099013bSjsg int ret;
8951099013bSjsg
896c349dbc7Sjsg dma_resv_assert_held(bo->base.resv);
897c349dbc7Sjsg
898c349dbc7Sjsg /*
899c349dbc7Sjsg * Remove the backing store if no placement is given.
900c349dbc7Sjsg */
9015ca02815Sjsg if (!placement->num_placement && !placement->num_busy_placement)
9025ca02815Sjsg return ttm_bo_pipeline_gutting(bo);
903c349dbc7Sjsg
904f005ef32Sjsg /* Check whether we need to move buffer. */
905f005ef32Sjsg if (bo->resource && ttm_resource_compat(bo->resource, placement))
906f005ef32Sjsg return 0;
907f005ef32Sjsg
908f005ef32Sjsg /* Moving of pinned BOs is forbidden */
909f005ef32Sjsg if (bo->pin_count)
910f005ef32Sjsg return -EINVAL;
911f005ef32Sjsg
9127f4dd379Sjsg ret = ttm_bo_move_buffer(bo, placement, ctx);
913*bd61c7e9Sjsg /* For backward compatibility with userspace */
914*bd61c7e9Sjsg if (ret == -ENOSPC)
915*bd61c7e9Sjsg return -ENOMEM;
9161099013bSjsg if (ret)
9171099013bSjsg return ret;
918f005ef32Sjsg
9191099013bSjsg /*
9201099013bSjsg * We might need to add a TTM.
9211099013bSjsg */
922a8f13b42Sjsg if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
9237f4dd379Sjsg ret = ttm_tt_create(bo, true);
9241099013bSjsg if (ret)
9251099013bSjsg return ret;
9261099013bSjsg }
9271099013bSjsg return 0;
9281099013bSjsg }
9291099013bSjsg EXPORT_SYMBOL(ttm_bo_validate);
9301099013bSjsg
9311bb76ff1Sjsg /**
9321bb76ff1Sjsg * ttm_bo_init_reserved
9331bb76ff1Sjsg *
9341bb76ff1Sjsg * @bdev: Pointer to a ttm_device struct.
9351bb76ff1Sjsg * @bo: Pointer to a ttm_buffer_object to be initialized.
9361bb76ff1Sjsg * @type: Requested type of buffer object.
9371bb76ff1Sjsg * @placement: Initial placement for buffer object.
9381bb76ff1Sjsg * @alignment: Data alignment in pages.
9391bb76ff1Sjsg * @ctx: TTM operation context for memory allocation.
9401bb76ff1Sjsg * @sg: Scatter-gather table.
9411bb76ff1Sjsg * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
9421bb76ff1Sjsg * @destroy: Destroy function. Use NULL for kfree().
9431bb76ff1Sjsg *
9441bb76ff1Sjsg * This function initializes a pre-allocated struct ttm_buffer_object.
9451bb76ff1Sjsg * As this object may be part of a larger structure, this function,
9461bb76ff1Sjsg * together with the @destroy function, enables driver-specific objects
9471bb76ff1Sjsg * derived from a ttm_buffer_object.
9481bb76ff1Sjsg *
9491bb76ff1Sjsg * On successful return, the caller owns an object kref to @bo. The kref and
9501bb76ff1Sjsg * list_kref are usually set to 1, but note that in some situations, other
9511bb76ff1Sjsg * tasks may already be holding references to @bo as well.
9521bb76ff1Sjsg * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
9531bb76ff1Sjsg * and it is the caller's responsibility to call ttm_bo_unreserve.
9541bb76ff1Sjsg *
9551bb76ff1Sjsg * If a failure occurs, the function will call the @destroy function. Thus,
9561bb76ff1Sjsg * after a failure, dereferencing @bo is illegal and will likely cause memory
9571bb76ff1Sjsg * corruption.
9581bb76ff1Sjsg *
9591bb76ff1Sjsg * Returns
9601bb76ff1Sjsg * -ENOMEM: Out of memory.
9611bb76ff1Sjsg * -EINVAL: Invalid placement flags.
9621bb76ff1Sjsg * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
9631bb76ff1Sjsg */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))9641bb76ff1Sjsg int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
9651bb76ff1Sjsg enum ttm_bo_type type, struct ttm_placement *placement,
9661bb76ff1Sjsg uint32_t alignment, struct ttm_operation_ctx *ctx,
9671bb76ff1Sjsg struct sg_table *sg, struct dma_resv *resv,
9681099013bSjsg void (*destroy) (struct ttm_buffer_object *))
9691099013bSjsg {
9705ca02815Sjsg int ret;
9711099013bSjsg
97263537ec6Sjsg kref_init(&bo->kref);
9731099013bSjsg bo->bdev = bdev;
9741099013bSjsg bo->type = type;
9751bb76ff1Sjsg bo->page_alignment = alignment;
9761bb76ff1Sjsg bo->destroy = destroy;
9775ca02815Sjsg bo->pin_count = 0;
9781099013bSjsg bo->sg = sg;
9791bb76ff1Sjsg bo->bulk_move = NULL;
9801bb76ff1Sjsg if (resv)
981c349dbc7Sjsg bo->base.resv = resv;
9821bb76ff1Sjsg else
983c349dbc7Sjsg bo->base.resv = &bo->base._resv;
9845ca02815Sjsg atomic_inc(&ttm_glob.bo_count);
9855ca02815Sjsg
9861099013bSjsg /*
9871099013bSjsg * For ttm_bo_type_device buffers, allocate
9881099013bSjsg * address space from the device.
9891099013bSjsg */
9901bb76ff1Sjsg if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
991c349dbc7Sjsg ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
9921bb76ff1Sjsg PFN_UP(bo->base.size));
9931bb76ff1Sjsg if (ret)
9941bb76ff1Sjsg goto err_put;
9951bb76ff1Sjsg }
9967ccd5a2cSjsg
9977ccd5a2cSjsg /* passed reservation objects should already be locked,
9987ccd5a2cSjsg * since otherwise lockdep will be angered in radeon.
9997ccd5a2cSjsg */
10007f4dd379Sjsg if (!resv)
10011bb76ff1Sjsg WARN_ON(!dma_resv_trylock(bo->base.resv));
10021bb76ff1Sjsg else
10031bb76ff1Sjsg dma_resv_assert_held(resv);
10047f4dd379Sjsg
10051bb76ff1Sjsg ret = ttm_bo_validate(bo, placement, ctx);
10061bb76ff1Sjsg if (unlikely(ret))
10071bb76ff1Sjsg goto err_unlock;
10081bb76ff1Sjsg
10091bb76ff1Sjsg return 0;
10101bb76ff1Sjsg
10111bb76ff1Sjsg err_unlock:
10121bb76ff1Sjsg if (!resv)
10131bb76ff1Sjsg dma_resv_unlock(bo->base.resv);
10141bb76ff1Sjsg
10151bb76ff1Sjsg err_put:
10167f4dd379Sjsg ttm_bo_put(bo);
10177f4dd379Sjsg return ret;
10187f4dd379Sjsg }
10197f4dd379Sjsg EXPORT_SYMBOL(ttm_bo_init_reserved);
10207f4dd379Sjsg
10211bb76ff1Sjsg /**
10221bb76ff1Sjsg * ttm_bo_init_validate
10231bb76ff1Sjsg *
10241bb76ff1Sjsg * @bdev: Pointer to a ttm_device struct.
10251bb76ff1Sjsg * @bo: Pointer to a ttm_buffer_object to be initialized.
10261bb76ff1Sjsg * @type: Requested type of buffer object.
10271bb76ff1Sjsg * @placement: Initial placement for buffer object.
10281bb76ff1Sjsg * @alignment: Data alignment in pages.
10291bb76ff1Sjsg * @interruptible: If needing to sleep to wait for GPU resources,
10301bb76ff1Sjsg * sleep interruptible.
10311bb76ff1Sjsg * pinned in physical memory. If this behaviour is not desired, this member
10321bb76ff1Sjsg * holds a pointer to a persistent shmem object. Typically, this would
10331bb76ff1Sjsg * point to the shmem object backing a GEM object if TTM is used to back a
10341bb76ff1Sjsg * GEM user interface.
10351bb76ff1Sjsg * @sg: Scatter-gather table.
10361bb76ff1Sjsg * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
10371bb76ff1Sjsg * @destroy: Destroy function. Use NULL for kfree().
10381bb76ff1Sjsg *
10391bb76ff1Sjsg * This function initializes a pre-allocated struct ttm_buffer_object.
10401bb76ff1Sjsg * As this object may be part of a larger structure, this function,
10411bb76ff1Sjsg * together with the @destroy function,
10421bb76ff1Sjsg * enables driver-specific objects derived from a ttm_buffer_object.
10431bb76ff1Sjsg *
10441bb76ff1Sjsg * On successful return, the caller owns an object kref to @bo. The kref and
10451bb76ff1Sjsg * list_kref are usually set to 1, but note that in some situations, other
10461bb76ff1Sjsg * tasks may already be holding references to @bo as well.
10471bb76ff1Sjsg *
10481bb76ff1Sjsg * If a failure occurs, the function will call the @destroy function, Thus,
10491bb76ff1Sjsg * after a failure, dereferencing @bo is illegal and will likely cause memory
10501bb76ff1Sjsg * corruption.
10511bb76ff1Sjsg *
10521bb76ff1Sjsg * Returns
10531bb76ff1Sjsg * -ENOMEM: Out of memory.
10541bb76ff1Sjsg * -EINVAL: Invalid placement flags.
10551bb76ff1Sjsg * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
10561bb76ff1Sjsg */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))10571bb76ff1Sjsg int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
10581bb76ff1Sjsg enum ttm_bo_type type, struct ttm_placement *placement,
10591bb76ff1Sjsg uint32_t alignment, bool interruptible,
10601bb76ff1Sjsg struct sg_table *sg, struct dma_resv *resv,
10617f4dd379Sjsg void (*destroy) (struct ttm_buffer_object *))
10627f4dd379Sjsg {
10637f4dd379Sjsg struct ttm_operation_ctx ctx = { interruptible, false };
10647f4dd379Sjsg int ret;
10657f4dd379Sjsg
10661bb76ff1Sjsg ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
10671bb76ff1Sjsg sg, resv, destroy);
10687f4dd379Sjsg if (ret)
10697f4dd379Sjsg return ret;
10701099013bSjsg
10717ccd5a2cSjsg if (!resv)
10721099013bSjsg ttm_bo_unreserve(bo);
10731099013bSjsg
10747f4dd379Sjsg return 0;
10751099013bSjsg }
10761bb76ff1Sjsg EXPORT_SYMBOL(ttm_bo_init_validate);
10771099013bSjsg
10781099013bSjsg /*
10791099013bSjsg * buffer object vm functions.
10801099013bSjsg */
10811099013bSjsg
1082f005ef32Sjsg /**
1083f005ef32Sjsg * ttm_bo_unmap_virtual
1084f005ef32Sjsg *
1085f005ef32Sjsg * @bo: tear down the virtual mappings for this BO
1086f005ef32Sjsg */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1087ad8b1aafSjsg void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
10887ccd5a2cSjsg {
10895ca02815Sjsg struct ttm_device *bdev = bo->bdev;
10907ccd5a2cSjsg
1091513b98cbSkettenis #ifdef __linux__
1092c349dbc7Sjsg drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1093513b98cbSkettenis #else
1094ad8b1aafSjsg if (drm_mm_node_allocated(&bo->base.vma_node.vm_node)) {
1095513b98cbSkettenis struct vm_page *pg;
1096513b98cbSkettenis bus_addr_t addr;
1097513b98cbSkettenis paddr_t paddr;
1098513b98cbSkettenis unsigned i;
1099513b98cbSkettenis
1100f005ef32Sjsg if (bo->resource && bo->resource->bus.is_iomem) {
11015ca02815Sjsg addr = bo->resource->bus.offset;
1102513b98cbSkettenis paddr = bus_space_mmap(bdev->memt, addr, 0, 0, 0);
1103f005ef32Sjsg for (i = 0; i < PFN_UP(bo->base.size); i++) {
1104513b98cbSkettenis pg = PHYS_TO_VM_PAGE(paddr);
1105513b98cbSkettenis if (pg)
1106513b98cbSkettenis pmap_page_protect(pg, PROT_NONE);
1107513b98cbSkettenis paddr += PAGE_SIZE;
1108513b98cbSkettenis }
1109513b98cbSkettenis } else if (bo->ttm) {
1110513b98cbSkettenis for (i = 0; i < bo->ttm->num_pages; i++) {
1111513b98cbSkettenis pg = bo->ttm->pages[i];
1112513b98cbSkettenis if (pg)
1113513b98cbSkettenis pmap_page_protect(pg, PROT_NONE);
1114513b98cbSkettenis }
1115513b98cbSkettenis }
1116513b98cbSkettenis }
1117513b98cbSkettenis #endif
11185ca02815Sjsg ttm_mem_io_free(bdev, bo->resource);
11197ccd5a2cSjsg }
11201099013bSjsg EXPORT_SYMBOL(ttm_bo_unmap_virtual);
11211099013bSjsg
1122f005ef32Sjsg /**
1123f005ef32Sjsg * ttm_bo_wait_ctx - wait for buffer idle.
1124f005ef32Sjsg *
1125f005ef32Sjsg * @bo: The buffer object.
1126f005ef32Sjsg * @ctx: defines how to wait
1127f005ef32Sjsg *
1128f005ef32Sjsg * Waits for the buffer to be idle. Used timeout depends on the context.
1129f005ef32Sjsg * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1130f005ef32Sjsg * zero on success.
1131f005ef32Sjsg */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1132f005ef32Sjsg int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
11331099013bSjsg {
1134f005ef32Sjsg long ret;
11351099013bSjsg
1136f005ef32Sjsg if (ctx->no_wait_gpu) {
1137f005ef32Sjsg if (dma_resv_test_signaled(bo->base.resv,
1138f005ef32Sjsg DMA_RESV_USAGE_BOOKKEEP))
11397f4dd379Sjsg return 0;
11407f4dd379Sjsg else
1141f3eef2b6Sderaadt return -EBUSY;
11427f4dd379Sjsg }
1143f3eef2b6Sderaadt
1144f005ef32Sjsg ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1145f005ef32Sjsg ctx->interruptible, 15 * HZ);
1146f005ef32Sjsg if (unlikely(ret < 0))
1147f005ef32Sjsg return ret;
1148f005ef32Sjsg if (unlikely(ret == 0))
11497ccd5a2cSjsg return -EBUSY;
11501099013bSjsg return 0;
11511099013bSjsg }
1152f005ef32Sjsg EXPORT_SYMBOL(ttm_bo_wait_ctx);
11531099013bSjsg
ttm_bo_swapout(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,gfp_t gfp_flags)11545ca02815Sjsg int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
11555ca02815Sjsg gfp_t gfp_flags)
11561099013bSjsg {
11575ca02815Sjsg struct ttm_place place;
11587f4dd379Sjsg bool locked;
1159f005ef32Sjsg long ret;
11601099013bSjsg
11615ca02815Sjsg /*
11625ca02815Sjsg * While the bo may already reside in SYSTEM placement, set
11635ca02815Sjsg * SYSTEM as new placement to cover also the move further below.
11645ca02815Sjsg * The driver may use the fact that we're moving from SYSTEM
11655ca02815Sjsg * as an indication that we're about to swap out.
11665ca02815Sjsg */
11675ca02815Sjsg memset(&place, 0, sizeof(place));
11681bb76ff1Sjsg place.mem_type = bo->resource->mem_type;
11695ca02815Sjsg if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
11705ca02815Sjsg return -EBUSY;
1171c349dbc7Sjsg
11725ca02815Sjsg if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
11731bb76ff1Sjsg bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
11741bb76ff1Sjsg bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
11755ca02815Sjsg !ttm_bo_get_unless_zero(bo)) {
1176c349dbc7Sjsg if (locked)
1177c349dbc7Sjsg dma_resv_unlock(bo->base.resv);
11785ca02815Sjsg return -EBUSY;
11791099013bSjsg }
11801099013bSjsg
1181c349dbc7Sjsg if (bo->deleted) {
11827f4dd379Sjsg ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1183c349dbc7Sjsg ttm_bo_put(bo);
11845ca02815Sjsg return ret == -EBUSY ? -ENOSPC : ret;
11851099013bSjsg }
11861099013bSjsg
11875ca02815Sjsg /* TODO: Cleanup the locking */
11885ca02815Sjsg spin_unlock(&bo->bdev->lru_lock);
11891099013bSjsg
11905ca02815Sjsg /*
11917f4dd379Sjsg * Move to system cached
11921099013bSjsg */
11935ca02815Sjsg if (bo->resource->mem_type != TTM_PL_SYSTEM) {
11945ca02815Sjsg struct ttm_resource *evict_mem;
11955ca02815Sjsg struct ttm_place hop;
11961099013bSjsg
11975ca02815Sjsg memset(&hop, 0, sizeof(hop));
11981bb76ff1Sjsg place.mem_type = TTM_PL_SYSTEM;
11995ca02815Sjsg ret = ttm_resource_alloc(bo, &place, &evict_mem);
12005ca02815Sjsg if (unlikely(ret))
12015ca02815Sjsg goto out;
12021099013bSjsg
1203f005ef32Sjsg ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
12045ca02815Sjsg if (unlikely(ret != 0)) {
12055ca02815Sjsg WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1206fc38e51bSjsg ttm_resource_free(bo, &evict_mem);
12071099013bSjsg goto out;
12081099013bSjsg }
12095ca02815Sjsg }
12101099013bSjsg
12115ca02815Sjsg /*
12127f4dd379Sjsg * Make sure BO is idle.
12137f4dd379Sjsg */
1214f005ef32Sjsg ret = ttm_bo_wait_ctx(bo, ctx);
12157f4dd379Sjsg if (unlikely(ret != 0))
12167f4dd379Sjsg goto out;
12177f4dd379Sjsg
12181099013bSjsg ttm_bo_unmap_virtual(bo);
12191099013bSjsg
12205ca02815Sjsg /*
12211099013bSjsg * Swap out. Buffer will be swapped in again as soon as
12221099013bSjsg * anyone tries to access a ttm page.
12231099013bSjsg */
12245ca02815Sjsg if (bo->bdev->funcs->swap_notify)
12255ca02815Sjsg bo->bdev->funcs->swap_notify(bo);
12261099013bSjsg
12275ca02815Sjsg if (ttm_tt_is_populated(bo->ttm))
12285ca02815Sjsg ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
12291099013bSjsg out:
12301099013bSjsg
12315ca02815Sjsg /*
12321099013bSjsg * Unreserve without putting on LRU to avoid swapping out an
12331099013bSjsg * already swapped buffer.
12341099013bSjsg */
12357f4dd379Sjsg if (locked)
1236c349dbc7Sjsg dma_resv_unlock(bo->base.resv);
1237c349dbc7Sjsg ttm_bo_put(bo);
12385ca02815Sjsg return ret == -EBUSY ? -ENOSPC : ret;
12391099013bSjsg }
1240ad8b1aafSjsg
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1241ad8b1aafSjsg void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1242ad8b1aafSjsg {
1243ad8b1aafSjsg if (bo->ttm == NULL)
1244ad8b1aafSjsg return;
1245ad8b1aafSjsg
12461bb76ff1Sjsg ttm_tt_unpopulate(bo->bdev, bo->ttm);
1247ad8b1aafSjsg ttm_tt_destroy(bo->bdev, bo->ttm);
1248ad8b1aafSjsg bo->ttm = NULL;
1249ad8b1aafSjsg }
1250