xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_object.c (revision 4bc47a456fae607e0b45f11409eda0e08725d61b)
1*4bc47a45Sriastradh /*	$NetBSD: amdgpu_object.c,v 1.7 2021/12/19 12:33:19 riastradh Exp $	*/
2efa246c0Sriastradh 
3efa246c0Sriastradh /*
4efa246c0Sriastradh  * Copyright 2009 Jerome Glisse.
5efa246c0Sriastradh  * All Rights Reserved.
6efa246c0Sriastradh  *
7efa246c0Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
8efa246c0Sriastradh  * copy of this software and associated documentation files (the
9efa246c0Sriastradh  * "Software"), to deal in the Software without restriction, including
10efa246c0Sriastradh  * without limitation the rights to use, copy, modify, merge, publish,
11efa246c0Sriastradh  * distribute, sub license, and/or sell copies of the Software, and to
12efa246c0Sriastradh  * permit persons to whom the Software is furnished to do so, subject to
13efa246c0Sriastradh  * the following conditions:
14efa246c0Sriastradh  *
15efa246c0Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16efa246c0Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17efa246c0Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18efa246c0Sriastradh  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19efa246c0Sriastradh  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20efa246c0Sriastradh  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21efa246c0Sriastradh  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22efa246c0Sriastradh  *
23efa246c0Sriastradh  * The above copyright notice and this permission notice (including the
24efa246c0Sriastradh  * next paragraph) shall be included in all copies or substantial portions
25efa246c0Sriastradh  * of the Software.
26efa246c0Sriastradh  *
27efa246c0Sriastradh  */
28efa246c0Sriastradh /*
29efa246c0Sriastradh  * Authors:
30efa246c0Sriastradh  *    Jerome Glisse <glisse@freedesktop.org>
31efa246c0Sriastradh  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32efa246c0Sriastradh  *    Dave Airlie
33efa246c0Sriastradh  */
34efa246c0Sriastradh #include <sys/cdefs.h>
35*4bc47a45Sriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_object.c,v 1.7 2021/12/19 12:33:19 riastradh Exp $");
36efa246c0Sriastradh 
372b73d18aSriastradh #include <linux/io.h>
38efa246c0Sriastradh #include <linux/list.h>
39efa246c0Sriastradh #include <linux/slab.h>
4041ec0267Sriastradh 
41efa246c0Sriastradh #include <drm/amdgpu_drm.h>
42efa246c0Sriastradh #include <drm/drm_cache.h>
43efa246c0Sriastradh #include "amdgpu.h"
44efa246c0Sriastradh #include "amdgpu_trace.h"
4541ec0267Sriastradh #include "amdgpu_amdkfd.h"
461b46a69aSriastradh #include <linux/nbsd-namespace.h>
471b46a69aSriastradh 
4841ec0267Sriastradh /**
4941ec0267Sriastradh  * DOC: amdgpu_object
5041ec0267Sriastradh  *
5141ec0267Sriastradh  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
5241ec0267Sriastradh  * represents memory used by driver (VRAM, system memory, etc.). The driver
5341ec0267Sriastradh  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
5441ec0267Sriastradh  * to create/destroy/set buffer object which are then managed by the kernel TTM
5541ec0267Sriastradh  * memory manager.
5641ec0267Sriastradh  * The interfaces are also used internally by kernel clients, including gfx,
5741ec0267Sriastradh  * uvd, etc. for kernel managed allocations used by the GPU.
5841ec0267Sriastradh  *
5941ec0267Sriastradh  */
6041ec0267Sriastradh 
6141ec0267Sriastradh /**
6241ec0267Sriastradh  * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
6341ec0267Sriastradh  *
6441ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
6541ec0267Sriastradh  *
6641ec0267Sriastradh  * This function is called when a BO stops being pinned, and updates the
6741ec0267Sriastradh  * &amdgpu_device pin_size values accordingly.
6841ec0267Sriastradh  */
amdgpu_bo_subtract_pin_size(struct amdgpu_bo * bo)6941ec0267Sriastradh static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
70efa246c0Sriastradh {
7141ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
7241ec0267Sriastradh 
7341ec0267Sriastradh 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
7441ec0267Sriastradh 		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
7541ec0267Sriastradh 		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
7641ec0267Sriastradh 			     &adev->visible_pin_size);
7741ec0267Sriastradh 	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
7841ec0267Sriastradh 		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
79efa246c0Sriastradh 	}
80efa246c0Sriastradh }
81efa246c0Sriastradh 
amdgpu_bo_destroy(struct ttm_buffer_object * tbo)8241ec0267Sriastradh static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
83efa246c0Sriastradh {
8441ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
8541ec0267Sriastradh 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
86efa246c0Sriastradh 
8741ec0267Sriastradh 	if (bo->pin_count > 0)
8841ec0267Sriastradh 		amdgpu_bo_subtract_pin_size(bo);
8941ec0267Sriastradh 
9041ec0267Sriastradh 	amdgpu_bo_kunmap(bo);
9141ec0267Sriastradh 
9241ec0267Sriastradh 	if (bo->tbo.base.import_attach)
9341ec0267Sriastradh 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
9441ec0267Sriastradh 	drm_gem_object_release(&bo->tbo.base);
9541ec0267Sriastradh 	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
9641ec0267Sriastradh 	if (!list_empty(&bo->shadow_list)) {
9741ec0267Sriastradh 		mutex_lock(&adev->shadow_list_lock);
9841ec0267Sriastradh 		list_del_init(&bo->shadow_list);
9941ec0267Sriastradh 		mutex_unlock(&adev->shadow_list_lock);
100efa246c0Sriastradh 	}
101efa246c0Sriastradh 	amdgpu_bo_unref(&bo->parent);
10241ec0267Sriastradh 
103efa246c0Sriastradh 	kfree(bo->metadata);
104efa246c0Sriastradh 	kfree(bo);
105efa246c0Sriastradh }
106efa246c0Sriastradh 
10741ec0267Sriastradh /**
10841ec0267Sriastradh  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
10941ec0267Sriastradh  * @bo: buffer object to be checked
11041ec0267Sriastradh  *
11141ec0267Sriastradh  * Uses destroy function associated with the object to determine if this is
11241ec0267Sriastradh  * an &amdgpu_bo.
11341ec0267Sriastradh  *
11441ec0267Sriastradh  * Returns:
11541ec0267Sriastradh  * true if the object belongs to &amdgpu_bo, false if not.
11641ec0267Sriastradh  */
amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object * bo)11741ec0267Sriastradh bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
118efa246c0Sriastradh {
11941ec0267Sriastradh 	if (bo->destroy == &amdgpu_bo_destroy)
120efa246c0Sriastradh 		return true;
121efa246c0Sriastradh 	return false;
122efa246c0Sriastradh }
123efa246c0Sriastradh 
12441ec0267Sriastradh /**
12541ec0267Sriastradh  * amdgpu_bo_placement_from_domain - set buffer's placement
12641ec0267Sriastradh  * @abo: &amdgpu_bo buffer object whose placement is to be set
12741ec0267Sriastradh  * @domain: requested domain
12841ec0267Sriastradh  *
12941ec0267Sriastradh  * Sets buffer's placement according to requested domain and the buffer's
13041ec0267Sriastradh  * flags.
13141ec0267Sriastradh  */
amdgpu_bo_placement_from_domain(struct amdgpu_bo * abo,u32 domain)13241ec0267Sriastradh void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
133efa246c0Sriastradh {
13441ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
13541ec0267Sriastradh 	struct ttm_placement *placement = &abo->placement;
13641ec0267Sriastradh 	struct ttm_place *places = abo->placements;
13741ec0267Sriastradh 	u64 flags = abo->flags;
13841ec0267Sriastradh 	u32 c = 0;
139efa246c0Sriastradh 
140efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
14141ec0267Sriastradh 		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
14241ec0267Sriastradh 
14341ec0267Sriastradh 		places[c].fpfn = 0;
14441ec0267Sriastradh 		places[c].lpfn = 0;
14541ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
146efa246c0Sriastradh 			TTM_PL_FLAG_VRAM;
14741ec0267Sriastradh 
14841ec0267Sriastradh 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
14941ec0267Sriastradh 			places[c].lpfn = visible_pfn;
15041ec0267Sriastradh 		else
15141ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
15241ec0267Sriastradh 
15341ec0267Sriastradh 		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
15441ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
15541ec0267Sriastradh 		c++;
156efa246c0Sriastradh 	}
157efa246c0Sriastradh 
158efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
15941ec0267Sriastradh 		places[c].fpfn = 0;
16041ec0267Sriastradh 		places[c].lpfn = 0;
16141ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_TT;
16241ec0267Sriastradh 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
16341ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_WC |
164efa246c0Sriastradh 				TTM_PL_FLAG_UNCACHED;
16541ec0267Sriastradh 		else
16641ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_CACHED;
16741ec0267Sriastradh 		c++;
168efa246c0Sriastradh 	}
169efa246c0Sriastradh 
170efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
17141ec0267Sriastradh 		places[c].fpfn = 0;
17241ec0267Sriastradh 		places[c].lpfn = 0;
17341ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_SYSTEM;
17441ec0267Sriastradh 		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
17541ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_WC |
176efa246c0Sriastradh 				TTM_PL_FLAG_UNCACHED;
17741ec0267Sriastradh 		else
17841ec0267Sriastradh 			places[c].flags |= TTM_PL_FLAG_CACHED;
17941ec0267Sriastradh 		c++;
180efa246c0Sriastradh 	}
181efa246c0Sriastradh 
182efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
18341ec0267Sriastradh 		places[c].fpfn = 0;
18441ec0267Sriastradh 		places[c].lpfn = 0;
18541ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
18641ec0267Sriastradh 		c++;
187efa246c0Sriastradh 	}
18841ec0267Sriastradh 
189efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
19041ec0267Sriastradh 		places[c].fpfn = 0;
19141ec0267Sriastradh 		places[c].lpfn = 0;
19241ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
19341ec0267Sriastradh 		c++;
194efa246c0Sriastradh 	}
19541ec0267Sriastradh 
196efa246c0Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_OA) {
19741ec0267Sriastradh 		places[c].fpfn = 0;
19841ec0267Sriastradh 		places[c].lpfn = 0;
19941ec0267Sriastradh 		places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
20041ec0267Sriastradh 		c++;
201efa246c0Sriastradh 	}
202efa246c0Sriastradh 
203efa246c0Sriastradh 	if (!c) {
20441ec0267Sriastradh 		places[c].fpfn = 0;
20541ec0267Sriastradh 		places[c].lpfn = 0;
20641ec0267Sriastradh 		places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
20741ec0267Sriastradh 		c++;
208efa246c0Sriastradh 	}
20941ec0267Sriastradh 
21041ec0267Sriastradh 	BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
21141ec0267Sriastradh 
212efa246c0Sriastradh 	placement->num_placement = c;
21341ec0267Sriastradh 	placement->placement = places;
21441ec0267Sriastradh 
215efa246c0Sriastradh 	placement->num_busy_placement = c;
21641ec0267Sriastradh 	placement->busy_placement = places;
21741ec0267Sriastradh }
218efa246c0Sriastradh 
21941ec0267Sriastradh /**
22041ec0267Sriastradh  * amdgpu_bo_create_reserved - create reserved BO for kernel use
22141ec0267Sriastradh  *
22241ec0267Sriastradh  * @adev: amdgpu device object
22341ec0267Sriastradh  * @size: size for the new BO
22441ec0267Sriastradh  * @align: alignment for the new BO
22541ec0267Sriastradh  * @domain: where to place it
22641ec0267Sriastradh  * @bo_ptr: used to initialize BOs in structures
22741ec0267Sriastradh  * @gpu_addr: GPU addr of the pinned BO
22841ec0267Sriastradh  * @cpu_addr: optional CPU address mapping
22941ec0267Sriastradh  *
23041ec0267Sriastradh  * Allocates and pins a BO for kernel internal use, and returns it still
23141ec0267Sriastradh  * reserved.
23241ec0267Sriastradh  *
23341ec0267Sriastradh  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
23441ec0267Sriastradh  *
23541ec0267Sriastradh  * Returns:
23641ec0267Sriastradh  * 0 on success, negative error code otherwise.
23741ec0267Sriastradh  */
amdgpu_bo_create_reserved(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)23841ec0267Sriastradh int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
23941ec0267Sriastradh 			      unsigned long size, int align,
24041ec0267Sriastradh 			      u32 domain, struct amdgpu_bo **bo_ptr,
24141ec0267Sriastradh 			      u64 *gpu_addr, void **cpu_addr)
24241ec0267Sriastradh {
24341ec0267Sriastradh 	struct amdgpu_bo_param bp;
24441ec0267Sriastradh 	bool free = false;
24541ec0267Sriastradh 	int r;
24641ec0267Sriastradh 
24741ec0267Sriastradh 	if (!size) {
24841ec0267Sriastradh 		amdgpu_bo_unref(bo_ptr);
24941ec0267Sriastradh 		return 0;
25041ec0267Sriastradh 	}
25141ec0267Sriastradh 
25241ec0267Sriastradh 	memset(&bp, 0, sizeof(bp));
25341ec0267Sriastradh 	bp.size = size;
25441ec0267Sriastradh 	bp.byte_align = align;
25541ec0267Sriastradh 	bp.domain = domain;
25641ec0267Sriastradh 	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
25741ec0267Sriastradh 		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
25841ec0267Sriastradh 	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
25941ec0267Sriastradh 	bp.type = ttm_bo_type_kernel;
26041ec0267Sriastradh 	bp.resv = NULL;
26141ec0267Sriastradh 
26241ec0267Sriastradh 	if (!*bo_ptr) {
26341ec0267Sriastradh 		r = amdgpu_bo_create(adev, &bp, bo_ptr);
26441ec0267Sriastradh 		if (r) {
26541ec0267Sriastradh 			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
26641ec0267Sriastradh 				r);
26741ec0267Sriastradh 			return r;
26841ec0267Sriastradh 		}
26941ec0267Sriastradh 		free = true;
27041ec0267Sriastradh 	}
27141ec0267Sriastradh 
27241ec0267Sriastradh 	r = amdgpu_bo_reserve(*bo_ptr, false);
27341ec0267Sriastradh 	if (r) {
27441ec0267Sriastradh 		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
27541ec0267Sriastradh 		goto error_free;
27641ec0267Sriastradh 	}
27741ec0267Sriastradh 
27841ec0267Sriastradh 	r = amdgpu_bo_pin(*bo_ptr, domain);
27941ec0267Sriastradh 	if (r) {
28041ec0267Sriastradh 		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
28141ec0267Sriastradh 		goto error_unreserve;
28241ec0267Sriastradh 	}
28341ec0267Sriastradh 
28441ec0267Sriastradh 	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
28541ec0267Sriastradh 	if (r) {
28641ec0267Sriastradh 		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
28741ec0267Sriastradh 		goto error_unpin;
28841ec0267Sriastradh 	}
28941ec0267Sriastradh 
29041ec0267Sriastradh 	if (gpu_addr)
29141ec0267Sriastradh 		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
29241ec0267Sriastradh 
29341ec0267Sriastradh 	if (cpu_addr) {
29441ec0267Sriastradh 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
29541ec0267Sriastradh 		if (r) {
29641ec0267Sriastradh 			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
29741ec0267Sriastradh 			goto error_unpin;
29841ec0267Sriastradh 		}
29941ec0267Sriastradh 	}
30041ec0267Sriastradh 
30141ec0267Sriastradh 	return 0;
30241ec0267Sriastradh 
30341ec0267Sriastradh error_unpin:
30441ec0267Sriastradh 	amdgpu_bo_unpin(*bo_ptr);
30541ec0267Sriastradh error_unreserve:
30641ec0267Sriastradh 	amdgpu_bo_unreserve(*bo_ptr);
30741ec0267Sriastradh 
30841ec0267Sriastradh error_free:
30941ec0267Sriastradh 	if (free)
31041ec0267Sriastradh 		amdgpu_bo_unref(bo_ptr);
31141ec0267Sriastradh 
31241ec0267Sriastradh 	return r;
31341ec0267Sriastradh }
31441ec0267Sriastradh 
31541ec0267Sriastradh /**
31641ec0267Sriastradh  * amdgpu_bo_create_kernel - create BO for kernel use
31741ec0267Sriastradh  *
31841ec0267Sriastradh  * @adev: amdgpu device object
31941ec0267Sriastradh  * @size: size for the new BO
32041ec0267Sriastradh  * @align: alignment for the new BO
32141ec0267Sriastradh  * @domain: where to place it
32241ec0267Sriastradh  * @bo_ptr:  used to initialize BOs in structures
32341ec0267Sriastradh  * @gpu_addr: GPU addr of the pinned BO
32441ec0267Sriastradh  * @cpu_addr: optional CPU address mapping
32541ec0267Sriastradh  *
32641ec0267Sriastradh  * Allocates and pins a BO for kernel internal use.
32741ec0267Sriastradh  *
32841ec0267Sriastradh  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
32941ec0267Sriastradh  *
33041ec0267Sriastradh  * Returns:
33141ec0267Sriastradh  * 0 on success, negative error code otherwise.
33241ec0267Sriastradh  */
amdgpu_bo_create_kernel(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)33341ec0267Sriastradh int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
33441ec0267Sriastradh 			    unsigned long size, int align,
33541ec0267Sriastradh 			    u32 domain, struct amdgpu_bo **bo_ptr,
33641ec0267Sriastradh 			    u64 *gpu_addr, void **cpu_addr)
33741ec0267Sriastradh {
33841ec0267Sriastradh 	int r;
33941ec0267Sriastradh 
34041ec0267Sriastradh 	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
34141ec0267Sriastradh 				      gpu_addr, cpu_addr);
34241ec0267Sriastradh 
34341ec0267Sriastradh 	if (r)
34441ec0267Sriastradh 		return r;
34541ec0267Sriastradh 
34641ec0267Sriastradh 	if (*bo_ptr)
34741ec0267Sriastradh 		amdgpu_bo_unreserve(*bo_ptr);
34841ec0267Sriastradh 
34941ec0267Sriastradh 	return 0;
35041ec0267Sriastradh }
35141ec0267Sriastradh 
35241ec0267Sriastradh /**
35341ec0267Sriastradh  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
35441ec0267Sriastradh  *
35541ec0267Sriastradh  * @adev: amdgpu device object
35641ec0267Sriastradh  * @offset: offset of the BO
35741ec0267Sriastradh  * @size: size of the BO
35841ec0267Sriastradh  * @domain: where to place it
35941ec0267Sriastradh  * @bo_ptr:  used to initialize BOs in structures
36041ec0267Sriastradh  * @cpu_addr: optional CPU address mapping
36141ec0267Sriastradh  *
36241ec0267Sriastradh  * Creates a kernel BO at a specific offset in the address space of the domain.
36341ec0267Sriastradh  *
36441ec0267Sriastradh  * Returns:
36541ec0267Sriastradh  * 0 on success, negative error code otherwise.
36641ec0267Sriastradh  */
amdgpu_bo_create_kernel_at(struct amdgpu_device * adev,uint64_t offset,uint64_t size,uint32_t domain,struct amdgpu_bo ** bo_ptr,void ** cpu_addr)36741ec0267Sriastradh int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
36841ec0267Sriastradh 			       uint64_t offset, uint64_t size, uint32_t domain,
36941ec0267Sriastradh 			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
37041ec0267Sriastradh {
37141ec0267Sriastradh 	struct ttm_operation_ctx ctx = { false, false };
37241ec0267Sriastradh 	unsigned int i;
37341ec0267Sriastradh 	int r;
37441ec0267Sriastradh 
37541ec0267Sriastradh 	offset &= PAGE_MASK;
37641ec0267Sriastradh 	size = ALIGN(size, PAGE_SIZE);
37741ec0267Sriastradh 
37841ec0267Sriastradh 	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
37941ec0267Sriastradh 				      NULL, cpu_addr);
38041ec0267Sriastradh 	if (r)
38141ec0267Sriastradh 		return r;
38241ec0267Sriastradh 
38341ec0267Sriastradh 	/*
38441ec0267Sriastradh 	 * Remove the original mem node and create a new one at the request
38541ec0267Sriastradh 	 * position.
38641ec0267Sriastradh 	 */
38741ec0267Sriastradh 	if (cpu_addr)
38841ec0267Sriastradh 		amdgpu_bo_kunmap(*bo_ptr);
38941ec0267Sriastradh 
39041ec0267Sriastradh 	ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
39141ec0267Sriastradh 
39241ec0267Sriastradh 	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
39341ec0267Sriastradh 		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
39441ec0267Sriastradh 		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
39541ec0267Sriastradh 	}
39641ec0267Sriastradh 	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
39741ec0267Sriastradh 			     &(*bo_ptr)->tbo.mem, &ctx);
39841ec0267Sriastradh 	if (r)
39941ec0267Sriastradh 		goto error;
40041ec0267Sriastradh 
40141ec0267Sriastradh 	if (cpu_addr) {
40241ec0267Sriastradh 		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
40341ec0267Sriastradh 		if (r)
40441ec0267Sriastradh 			goto error;
40541ec0267Sriastradh 	}
40641ec0267Sriastradh 
40741ec0267Sriastradh 	amdgpu_bo_unreserve(*bo_ptr);
40841ec0267Sriastradh 	return 0;
40941ec0267Sriastradh 
41041ec0267Sriastradh error:
41141ec0267Sriastradh 	amdgpu_bo_unreserve(*bo_ptr);
41241ec0267Sriastradh 	amdgpu_bo_unref(bo_ptr);
41341ec0267Sriastradh 	return r;
41441ec0267Sriastradh }
41541ec0267Sriastradh 
41641ec0267Sriastradh /**
41741ec0267Sriastradh  * amdgpu_bo_free_kernel - free BO for kernel use
41841ec0267Sriastradh  *
41941ec0267Sriastradh  * @bo: amdgpu BO to free
42041ec0267Sriastradh  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
42141ec0267Sriastradh  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
42241ec0267Sriastradh  *
42341ec0267Sriastradh  * unmaps and unpin a BO for kernel internal use.
42441ec0267Sriastradh  */
amdgpu_bo_free_kernel(struct amdgpu_bo ** bo,u64 * gpu_addr,void ** cpu_addr)42541ec0267Sriastradh void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
42641ec0267Sriastradh 			   void **cpu_addr)
42741ec0267Sriastradh {
42841ec0267Sriastradh 	if (*bo == NULL)
42941ec0267Sriastradh 		return;
43041ec0267Sriastradh 
43141ec0267Sriastradh 	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
43241ec0267Sriastradh 		if (cpu_addr)
43341ec0267Sriastradh 			amdgpu_bo_kunmap(*bo);
43441ec0267Sriastradh 
43541ec0267Sriastradh 		amdgpu_bo_unpin(*bo);
43641ec0267Sriastradh 		amdgpu_bo_unreserve(*bo);
43741ec0267Sriastradh 	}
43841ec0267Sriastradh 	amdgpu_bo_unref(bo);
43941ec0267Sriastradh 
44041ec0267Sriastradh 	if (gpu_addr)
44141ec0267Sriastradh 		*gpu_addr = 0;
44241ec0267Sriastradh 
44341ec0267Sriastradh 	if (cpu_addr)
44441ec0267Sriastradh 		*cpu_addr = NULL;
44541ec0267Sriastradh }
44641ec0267Sriastradh 
44741ec0267Sriastradh /* Validate bo size is bit bigger then the request domain */
amdgpu_bo_validate_size(struct amdgpu_device * adev,unsigned long size,u32 domain)44841ec0267Sriastradh static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
44941ec0267Sriastradh 					  unsigned long size, u32 domain)
45041ec0267Sriastradh {
45141ec0267Sriastradh 	struct ttm_mem_type_manager *man = NULL;
45241ec0267Sriastradh 
45341ec0267Sriastradh 	/*
45441ec0267Sriastradh 	 * If GTT is part of requested domains the check must succeed to
45541ec0267Sriastradh 	 * allow fall back to GTT
45641ec0267Sriastradh 	 */
45741ec0267Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
45841ec0267Sriastradh 		man = &adev->mman.bdev.man[TTM_PL_TT];
45941ec0267Sriastradh 
46041ec0267Sriastradh 		if (size < (man->size << PAGE_SHIFT))
46141ec0267Sriastradh 			return true;
462efa246c0Sriastradh 		else
46341ec0267Sriastradh 			goto fail;
464efa246c0Sriastradh 	}
465efa246c0Sriastradh 
46641ec0267Sriastradh 	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
46741ec0267Sriastradh 		man = &adev->mman.bdev.man[TTM_PL_VRAM];
46841ec0267Sriastradh 
46941ec0267Sriastradh 		if (size < (man->size << PAGE_SHIFT))
47041ec0267Sriastradh 			return true;
47141ec0267Sriastradh 		else
47241ec0267Sriastradh 			goto fail;
47341ec0267Sriastradh 	}
47441ec0267Sriastradh 
47541ec0267Sriastradh 
47641ec0267Sriastradh 	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
47741ec0267Sriastradh 	return true;
47841ec0267Sriastradh 
47941ec0267Sriastradh fail:
4802b73d18aSriastradh 	DRM_DEBUG("BO size %lu > total memory in domain: %"PRIu64"\n", size,
48141ec0267Sriastradh 		  man->size << PAGE_SHIFT);
48241ec0267Sriastradh 	return false;
48341ec0267Sriastradh }
48441ec0267Sriastradh 
amdgpu_bo_support_uswc(u64 bo_flags)48541ec0267Sriastradh bool amdgpu_bo_support_uswc(u64 bo_flags)
486efa246c0Sriastradh {
48741ec0267Sriastradh 
48841ec0267Sriastradh #ifdef CONFIG_X86_32
48941ec0267Sriastradh 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
49041ec0267Sriastradh 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
49141ec0267Sriastradh 	 */
49241ec0267Sriastradh 	return false;
49341ec0267Sriastradh #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
49441ec0267Sriastradh 	/* Don't try to enable write-combining when it can't work, or things
49541ec0267Sriastradh 	 * may be slow
49641ec0267Sriastradh 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
49741ec0267Sriastradh 	 */
49841ec0267Sriastradh 
49941ec0267Sriastradh #ifndef CONFIG_COMPILE_TEST
50041ec0267Sriastradh #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
50141ec0267Sriastradh 	 thanks to write-combining
50241ec0267Sriastradh #endif
50341ec0267Sriastradh 
50441ec0267Sriastradh 	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
50541ec0267Sriastradh 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
50641ec0267Sriastradh 			      "better performance thanks to write-combining\n");
50741ec0267Sriastradh 	return false;
50841ec0267Sriastradh #else
50941ec0267Sriastradh 	/* For architectures that don't support WC memory,
51041ec0267Sriastradh 	 * mask out the WC flag from the BO
51141ec0267Sriastradh 	 */
51241ec0267Sriastradh 	if (!drm_arch_can_wc_memory())
51341ec0267Sriastradh 		return false;
51441ec0267Sriastradh 
51541ec0267Sriastradh 	return true;
51641ec0267Sriastradh #endif
517efa246c0Sriastradh }
518efa246c0Sriastradh 
amdgpu_bo_do_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)51941ec0267Sriastradh static int amdgpu_bo_do_create(struct amdgpu_device *adev,
52041ec0267Sriastradh 			       struct amdgpu_bo_param *bp,
521efa246c0Sriastradh 			       struct amdgpu_bo **bo_ptr)
522efa246c0Sriastradh {
52341ec0267Sriastradh 	struct ttm_operation_ctx ctx = {
52441ec0267Sriastradh 		.interruptible = (bp->type != ttm_bo_type_kernel),
52541ec0267Sriastradh 		.no_wait_gpu = bp->no_wait_gpu,
52641ec0267Sriastradh 		.resv = bp->resv,
52741ec0267Sriastradh 		.flags = bp->type != ttm_bo_type_kernel ?
52841ec0267Sriastradh 			TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
52941ec0267Sriastradh 	};
530efa246c0Sriastradh 	struct amdgpu_bo *bo;
53141ec0267Sriastradh 	unsigned long page_align, size = bp->size;
532efa246c0Sriastradh 	size_t acc_size;
533efa246c0Sriastradh 	int r;
534efa246c0Sriastradh 
53541ec0267Sriastradh 	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
53641ec0267Sriastradh 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
53741ec0267Sriastradh 		/* GWS and OA don't need any alignment. */
53841ec0267Sriastradh 		page_align = bp->byte_align;
53941ec0267Sriastradh 		size <<= PAGE_SHIFT;
54041ec0267Sriastradh 	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
54141ec0267Sriastradh 		/* Both size and alignment must be a multiple of 4. */
54241ec0267Sriastradh 		page_align = ALIGN(bp->byte_align, 4);
54341ec0267Sriastradh 		size = ALIGN(size, 4) << PAGE_SHIFT;
544efa246c0Sriastradh 	} else {
54541ec0267Sriastradh 		/* Memory should be aligned at least to a page size. */
54641ec0267Sriastradh 		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
54741ec0267Sriastradh 		size = ALIGN(size, PAGE_SIZE);
548efa246c0Sriastradh 	}
54941ec0267Sriastradh 
55041ec0267Sriastradh 	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
55141ec0267Sriastradh 		return -ENOMEM;
55241ec0267Sriastradh 
553efa246c0Sriastradh 	*bo_ptr = NULL;
554efa246c0Sriastradh 
555efa246c0Sriastradh 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
556efa246c0Sriastradh 				       sizeof(struct amdgpu_bo));
557efa246c0Sriastradh 
558efa246c0Sriastradh 	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
559efa246c0Sriastradh 	if (bo == NULL)
560efa246c0Sriastradh 		return -ENOMEM;
56141ec0267Sriastradh 	drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
56241ec0267Sriastradh 	INIT_LIST_HEAD(&bo->shadow_list);
56341ec0267Sriastradh 	bo->vm_bo = NULL;
56441ec0267Sriastradh 	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
56541ec0267Sriastradh 		bp->domain;
56641ec0267Sriastradh 	bo->allowed_domains = bo->preferred_domains;
56741ec0267Sriastradh 	if (bp->type != ttm_bo_type_kernel &&
56841ec0267Sriastradh 	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
56941ec0267Sriastradh 		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
570efa246c0Sriastradh 
57141ec0267Sriastradh 	bo->flags = bp->flags;
572efa246c0Sriastradh 
57341ec0267Sriastradh 	if (!amdgpu_bo_support_uswc(bo->flags))
574efa246c0Sriastradh 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
575efa246c0Sriastradh 
57641ec0267Sriastradh 	bo->tbo.bdev = &adev->mman.bdev;
57741ec0267Sriastradh 	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
57841ec0267Sriastradh 			  AMDGPU_GEM_DOMAIN_GDS))
57941ec0267Sriastradh 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
58041ec0267Sriastradh 	else
58141ec0267Sriastradh 		amdgpu_bo_placement_from_domain(bo, bp->domain);
58241ec0267Sriastradh 	if (bp->type == ttm_bo_type_kernel)
58341ec0267Sriastradh 		bo->tbo.priority = 1;
58441ec0267Sriastradh 
58541ec0267Sriastradh 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
58641ec0267Sriastradh 				 &bo->placement, page_align, &ctx, acc_size,
58741ec0267Sriastradh 				 NULL, bp->resv, &amdgpu_bo_destroy);
58841ec0267Sriastradh 	if (unlikely(r != 0))
589efa246c0Sriastradh 		return r;
59041ec0267Sriastradh 
59141ec0267Sriastradh 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
59241ec0267Sriastradh 	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
59341ec0267Sriastradh 	    bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
59441ec0267Sriastradh 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
59541ec0267Sriastradh 					     ctx.bytes_moved);
59641ec0267Sriastradh 	else
59741ec0267Sriastradh 		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
59841ec0267Sriastradh 
59941ec0267Sriastradh 	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
60041ec0267Sriastradh 	    bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
60141ec0267Sriastradh 		struct dma_fence *fence;
60241ec0267Sriastradh 
60341ec0267Sriastradh 		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
60441ec0267Sriastradh 		if (unlikely(r))
60541ec0267Sriastradh 			goto fail_unreserve;
60641ec0267Sriastradh 
60741ec0267Sriastradh 		amdgpu_bo_fence(bo, fence, false);
60841ec0267Sriastradh 		dma_fence_put(bo->tbo.moving);
60941ec0267Sriastradh 		bo->tbo.moving = dma_fence_get(fence);
61041ec0267Sriastradh 		dma_fence_put(fence);
611efa246c0Sriastradh 	}
61241ec0267Sriastradh 	if (!bp->resv)
61341ec0267Sriastradh 		amdgpu_bo_unreserve(bo);
614efa246c0Sriastradh 	*bo_ptr = bo;
615efa246c0Sriastradh 
616efa246c0Sriastradh 	trace_amdgpu_bo_create(bo);
617efa246c0Sriastradh 
61841ec0267Sriastradh 	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
61941ec0267Sriastradh 	if (bp->type == ttm_bo_type_device)
62041ec0267Sriastradh 		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
62141ec0267Sriastradh 
622efa246c0Sriastradh 	return 0;
62341ec0267Sriastradh 
62441ec0267Sriastradh fail_unreserve:
62541ec0267Sriastradh 	if (!bp->resv)
62641ec0267Sriastradh 		dma_resv_unlock(bo->tbo.base.resv);
62741ec0267Sriastradh 	amdgpu_bo_unref(&bo);
62841ec0267Sriastradh 	return r;
629efa246c0Sriastradh }
630efa246c0Sriastradh 
amdgpu_bo_create_shadow(struct amdgpu_device * adev,unsigned long size,struct amdgpu_bo * bo)63141ec0267Sriastradh static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
63241ec0267Sriastradh 				   unsigned long size,
63341ec0267Sriastradh 				   struct amdgpu_bo *bo)
63441ec0267Sriastradh {
63541ec0267Sriastradh 	struct amdgpu_bo_param bp;
63641ec0267Sriastradh 	int r;
63741ec0267Sriastradh 
63841ec0267Sriastradh 	if (bo->shadow)
63941ec0267Sriastradh 		return 0;
64041ec0267Sriastradh 
64141ec0267Sriastradh 	memset(&bp, 0, sizeof(bp));
64241ec0267Sriastradh 	bp.size = size;
64341ec0267Sriastradh 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
64441ec0267Sriastradh 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
64541ec0267Sriastradh 		AMDGPU_GEM_CREATE_SHADOW;
64641ec0267Sriastradh 	bp.type = ttm_bo_type_kernel;
64741ec0267Sriastradh 	bp.resv = bo->tbo.base.resv;
64841ec0267Sriastradh 
64941ec0267Sriastradh 	r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
65041ec0267Sriastradh 	if (!r) {
65141ec0267Sriastradh 		bo->shadow->parent = amdgpu_bo_ref(bo);
65241ec0267Sriastradh 		mutex_lock(&adev->shadow_list_lock);
65341ec0267Sriastradh 		list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
65441ec0267Sriastradh 		mutex_unlock(&adev->shadow_list_lock);
65541ec0267Sriastradh 	}
65641ec0267Sriastradh 
65741ec0267Sriastradh 	return r;
65841ec0267Sriastradh }
65941ec0267Sriastradh 
66041ec0267Sriastradh /**
66141ec0267Sriastradh  * amdgpu_bo_create - create an &amdgpu_bo buffer object
66241ec0267Sriastradh  * @adev: amdgpu device object
66341ec0267Sriastradh  * @bp: parameters to be used for the buffer object
66441ec0267Sriastradh  * @bo_ptr: pointer to the buffer object pointer
66541ec0267Sriastradh  *
66641ec0267Sriastradh  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
66741ec0267Sriastradh  * shadow object.
66841ec0267Sriastradh  * Shadow object is used to backup the original buffer object, and is always
66941ec0267Sriastradh  * in GTT.
67041ec0267Sriastradh  *
67141ec0267Sriastradh  * Returns:
67241ec0267Sriastradh  * 0 for success or a negative error code on failure.
67341ec0267Sriastradh  */
amdgpu_bo_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)674efa246c0Sriastradh int amdgpu_bo_create(struct amdgpu_device *adev,
67541ec0267Sriastradh 		     struct amdgpu_bo_param *bp,
676efa246c0Sriastradh 		     struct amdgpu_bo **bo_ptr)
677efa246c0Sriastradh {
67841ec0267Sriastradh 	u64 flags = bp->flags;
67941ec0267Sriastradh 	int r;
680efa246c0Sriastradh 
68141ec0267Sriastradh 	bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
68241ec0267Sriastradh 	r = amdgpu_bo_do_create(adev, bp, bo_ptr);
68341ec0267Sriastradh 	if (r)
68441ec0267Sriastradh 		return r;
685efa246c0Sriastradh 
68641ec0267Sriastradh 	if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
68741ec0267Sriastradh 		if (!bp->resv)
68841ec0267Sriastradh 			WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
68941ec0267Sriastradh 							NULL));
690efa246c0Sriastradh 
69141ec0267Sriastradh 		r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
69241ec0267Sriastradh 
69341ec0267Sriastradh 		if (!bp->resv)
69441ec0267Sriastradh 			dma_resv_unlock((*bo_ptr)->tbo.base.resv);
69541ec0267Sriastradh 
69641ec0267Sriastradh 		if (r)
69741ec0267Sriastradh 			amdgpu_bo_unref(bo_ptr);
698efa246c0Sriastradh 	}
699efa246c0Sriastradh 
70041ec0267Sriastradh 	return r;
70141ec0267Sriastradh }
70241ec0267Sriastradh 
70341ec0267Sriastradh /**
70441ec0267Sriastradh  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
70541ec0267Sriastradh  * @bo: pointer to the buffer object
70641ec0267Sriastradh  *
70741ec0267Sriastradh  * Sets placement according to domain; and changes placement and caching
70841ec0267Sriastradh  * policy of the buffer object according to the placement.
70941ec0267Sriastradh  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
71041ec0267Sriastradh  * make sure the buffer is resident where it needs to be.
71141ec0267Sriastradh  *
71241ec0267Sriastradh  * Returns:
71341ec0267Sriastradh  * 0 for success or a negative error code on failure.
71441ec0267Sriastradh  */
amdgpu_bo_validate(struct amdgpu_bo * bo)71541ec0267Sriastradh int amdgpu_bo_validate(struct amdgpu_bo *bo)
71641ec0267Sriastradh {
71741ec0267Sriastradh 	struct ttm_operation_ctx ctx = { false, false };
71841ec0267Sriastradh 	uint32_t domain;
71941ec0267Sriastradh 	int r;
72041ec0267Sriastradh 
72141ec0267Sriastradh 	if (bo->pin_count)
72241ec0267Sriastradh 		return 0;
72341ec0267Sriastradh 
72441ec0267Sriastradh 	domain = bo->preferred_domains;
72541ec0267Sriastradh 
72641ec0267Sriastradh retry:
72741ec0267Sriastradh 	amdgpu_bo_placement_from_domain(bo, domain);
72841ec0267Sriastradh 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
72941ec0267Sriastradh 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
73041ec0267Sriastradh 		domain = bo->allowed_domains;
73141ec0267Sriastradh 		goto retry;
73241ec0267Sriastradh 	}
73341ec0267Sriastradh 
73441ec0267Sriastradh 	return r;
73541ec0267Sriastradh }
73641ec0267Sriastradh 
73741ec0267Sriastradh /**
73841ec0267Sriastradh  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
73941ec0267Sriastradh  *
74041ec0267Sriastradh  * @shadow: &amdgpu_bo shadow to be restored
74141ec0267Sriastradh  * @fence: dma_fence associated with the operation
74241ec0267Sriastradh  *
74341ec0267Sriastradh  * Copies a buffer object's shadow content back to the object.
74441ec0267Sriastradh  * This is used for recovering a buffer from its shadow in case of a gpu
74541ec0267Sriastradh  * reset where vram context may be lost.
74641ec0267Sriastradh  *
74741ec0267Sriastradh  * Returns:
74841ec0267Sriastradh  * 0 for success or a negative error code on failure.
74941ec0267Sriastradh  */
amdgpu_bo_restore_shadow(struct amdgpu_bo * shadow,struct dma_fence ** fence)75041ec0267Sriastradh int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
75141ec0267Sriastradh 
75241ec0267Sriastradh {
75341ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
75441ec0267Sriastradh 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
75541ec0267Sriastradh 	uint64_t shadow_addr, parent_addr;
75641ec0267Sriastradh 
75741ec0267Sriastradh 	shadow_addr = amdgpu_bo_gpu_offset(shadow);
75841ec0267Sriastradh 	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
75941ec0267Sriastradh 
76041ec0267Sriastradh 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
76141ec0267Sriastradh 				  amdgpu_bo_size(shadow), NULL, fence,
76241ec0267Sriastradh 				  true, false);
76341ec0267Sriastradh }
76441ec0267Sriastradh 
76541ec0267Sriastradh /**
76641ec0267Sriastradh  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
76741ec0267Sriastradh  * @bo: &amdgpu_bo buffer object to be mapped
76841ec0267Sriastradh  * @ptr: kernel virtual address to be returned
76941ec0267Sriastradh  *
77041ec0267Sriastradh  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
77141ec0267Sriastradh  * amdgpu_bo_kptr() to get the kernel virtual address.
77241ec0267Sriastradh  *
77341ec0267Sriastradh  * Returns:
77441ec0267Sriastradh  * 0 for success or a negative error code on failure.
77541ec0267Sriastradh  */
amdgpu_bo_kmap(struct amdgpu_bo * bo,void ** ptr)776efa246c0Sriastradh int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
777efa246c0Sriastradh {
77841ec0267Sriastradh 	void *kptr;
77941ec0267Sriastradh 	long r;
780efa246c0Sriastradh 
781efa246c0Sriastradh 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
782efa246c0Sriastradh 		return -EPERM;
783efa246c0Sriastradh 
78441ec0267Sriastradh 	kptr = amdgpu_bo_kptr(bo);
78541ec0267Sriastradh 	if (kptr) {
78641ec0267Sriastradh 		if (ptr)
78741ec0267Sriastradh 			*ptr = kptr;
788efa246c0Sriastradh 		return 0;
789efa246c0Sriastradh 	}
790efa246c0Sriastradh 
79141ec0267Sriastradh 	r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
79241ec0267Sriastradh 						MAX_SCHEDULE_TIMEOUT);
79341ec0267Sriastradh 	if (r < 0)
79441ec0267Sriastradh 		return r;
79541ec0267Sriastradh 
79641ec0267Sriastradh 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
79741ec0267Sriastradh 	if (r)
79841ec0267Sriastradh 		return r;
79941ec0267Sriastradh 
80041ec0267Sriastradh 	if (ptr)
80141ec0267Sriastradh 		*ptr = amdgpu_bo_kptr(bo);
80241ec0267Sriastradh 
80341ec0267Sriastradh 	return 0;
80441ec0267Sriastradh }
80541ec0267Sriastradh 
80641ec0267Sriastradh /**
80741ec0267Sriastradh  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
80841ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
80941ec0267Sriastradh  *
81041ec0267Sriastradh  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
81141ec0267Sriastradh  *
81241ec0267Sriastradh  * Returns:
81341ec0267Sriastradh  * the virtual address of a buffer object area.
81441ec0267Sriastradh  */
amdgpu_bo_kptr(struct amdgpu_bo * bo)81541ec0267Sriastradh void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
81641ec0267Sriastradh {
81741ec0267Sriastradh 	bool is_iomem;
81841ec0267Sriastradh 
81941ec0267Sriastradh 	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
82041ec0267Sriastradh }
82141ec0267Sriastradh 
82241ec0267Sriastradh /**
82341ec0267Sriastradh  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
82441ec0267Sriastradh  * @bo: &amdgpu_bo buffer object to be unmapped
82541ec0267Sriastradh  *
82641ec0267Sriastradh  * Unmaps a kernel map set up by amdgpu_bo_kmap().
82741ec0267Sriastradh  */
amdgpu_bo_kunmap(struct amdgpu_bo * bo)828efa246c0Sriastradh void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
829efa246c0Sriastradh {
83041ec0267Sriastradh 	if (bo->kmap.bo)
831efa246c0Sriastradh 		ttm_bo_kunmap(&bo->kmap);
832efa246c0Sriastradh }
833efa246c0Sriastradh 
83441ec0267Sriastradh /**
83541ec0267Sriastradh  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
83641ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
83741ec0267Sriastradh  *
83841ec0267Sriastradh  * References the contained &ttm_buffer_object.
83941ec0267Sriastradh  *
84041ec0267Sriastradh  * Returns:
84141ec0267Sriastradh  * a refcounted pointer to the &amdgpu_bo buffer object.
84241ec0267Sriastradh  */
amdgpu_bo_ref(struct amdgpu_bo * bo)843efa246c0Sriastradh struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
844efa246c0Sriastradh {
845efa246c0Sriastradh 	if (bo == NULL)
846efa246c0Sriastradh 		return NULL;
847efa246c0Sriastradh 
84841ec0267Sriastradh 	ttm_bo_get(&bo->tbo);
849efa246c0Sriastradh 	return bo;
850efa246c0Sriastradh }
851efa246c0Sriastradh 
85241ec0267Sriastradh /**
85341ec0267Sriastradh  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
85441ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
85541ec0267Sriastradh  *
85641ec0267Sriastradh  * Unreferences the contained &ttm_buffer_object and clear the pointer
85741ec0267Sriastradh  */
amdgpu_bo_unref(struct amdgpu_bo ** bo)858efa246c0Sriastradh void amdgpu_bo_unref(struct amdgpu_bo **bo)
859efa246c0Sriastradh {
860efa246c0Sriastradh 	struct ttm_buffer_object *tbo;
861efa246c0Sriastradh 
862efa246c0Sriastradh 	if ((*bo) == NULL)
863efa246c0Sriastradh 		return;
864efa246c0Sriastradh 
865efa246c0Sriastradh 	tbo = &((*bo)->tbo);
86641ec0267Sriastradh 	ttm_bo_put(tbo);
867efa246c0Sriastradh 	*bo = NULL;
868efa246c0Sriastradh }
869efa246c0Sriastradh 
87041ec0267Sriastradh /**
87141ec0267Sriastradh  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
87241ec0267Sriastradh  * @bo: &amdgpu_bo buffer object to be pinned
87341ec0267Sriastradh  * @domain: domain to be pinned to
87441ec0267Sriastradh  * @min_offset: the start of requested address range
87541ec0267Sriastradh  * @max_offset: the end of requested address range
87641ec0267Sriastradh  *
87741ec0267Sriastradh  * Pins the buffer object according to requested domain and address range. If
87841ec0267Sriastradh  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
87941ec0267Sriastradh  * pin_count and pin_size accordingly.
88041ec0267Sriastradh  *
88141ec0267Sriastradh  * Pinning means to lock pages in memory along with keeping them at a fixed
88241ec0267Sriastradh  * offset. It is required when a buffer can not be moved, for example, when
88341ec0267Sriastradh  * a display buffer is being scanned out.
88441ec0267Sriastradh  *
88541ec0267Sriastradh  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
88641ec0267Sriastradh  * where to pin a buffer if there are specific restrictions on where a buffer
88741ec0267Sriastradh  * must be located.
88841ec0267Sriastradh  *
88941ec0267Sriastradh  * Returns:
89041ec0267Sriastradh  * 0 for success or a negative error code on failure.
89141ec0267Sriastradh  */
amdgpu_bo_pin_restricted(struct amdgpu_bo * bo,u32 domain,u64 min_offset,u64 max_offset)892efa246c0Sriastradh int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
89341ec0267Sriastradh 			     u64 min_offset, u64 max_offset)
894efa246c0Sriastradh {
89541ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
89641ec0267Sriastradh 	struct ttm_operation_ctx ctx = { false, false };
897efa246c0Sriastradh 	int r, i;
898efa246c0Sriastradh 
89941ec0267Sriastradh 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
900efa246c0Sriastradh 		return -EPERM;
901efa246c0Sriastradh 
902efa246c0Sriastradh 	if (WARN_ON_ONCE(min_offset > max_offset))
903efa246c0Sriastradh 		return -EINVAL;
904efa246c0Sriastradh 
90541ec0267Sriastradh 	/* A shared bo cannot be migrated to VRAM */
90641ec0267Sriastradh 	if (bo->prime_shared_count) {
90741ec0267Sriastradh 		if (domain & AMDGPU_GEM_DOMAIN_GTT)
90841ec0267Sriastradh 			domain = AMDGPU_GEM_DOMAIN_GTT;
90941ec0267Sriastradh 		else
91041ec0267Sriastradh 			return -EINVAL;
91141ec0267Sriastradh 	}
91241ec0267Sriastradh 
91341ec0267Sriastradh 	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
91441ec0267Sriastradh 	 * See function amdgpu_display_supported_domains()
91541ec0267Sriastradh 	 */
91641ec0267Sriastradh 	domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
91741ec0267Sriastradh 
918efa246c0Sriastradh 	if (bo->pin_count) {
91941ec0267Sriastradh 		uint32_t mem_type = bo->tbo.mem.mem_type;
92041ec0267Sriastradh 
92141ec0267Sriastradh 		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
92241ec0267Sriastradh 			return -EINVAL;
92341ec0267Sriastradh 
924efa246c0Sriastradh 		bo->pin_count++;
925efa246c0Sriastradh 
926efa246c0Sriastradh 		if (max_offset != 0) {
92741ec0267Sriastradh 			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
928efa246c0Sriastradh 			WARN_ON_ONCE(max_offset <
929efa246c0Sriastradh 				     (amdgpu_bo_gpu_offset(bo) - domain_start));
930efa246c0Sriastradh 		}
931efa246c0Sriastradh 
932efa246c0Sriastradh 		return 0;
933efa246c0Sriastradh 	}
93441ec0267Sriastradh 
93541ec0267Sriastradh 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
936efa246c0Sriastradh 	/* force to pin into visible video ram */
93741ec0267Sriastradh 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
93841ec0267Sriastradh 		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
93941ec0267Sriastradh 	amdgpu_bo_placement_from_domain(bo, domain);
94041ec0267Sriastradh 	for (i = 0; i < bo->placement.num_placement; i++) {
94141ec0267Sriastradh 		unsigned fpfn, lpfn;
94241ec0267Sriastradh 
943efa246c0Sriastradh 		fpfn = min_offset >> PAGE_SHIFT;
944efa246c0Sriastradh 		lpfn = max_offset >> PAGE_SHIFT;
94541ec0267Sriastradh 
946efa246c0Sriastradh 		if (fpfn > bo->placements[i].fpfn)
947efa246c0Sriastradh 			bo->placements[i].fpfn = fpfn;
948efa246c0Sriastradh 		if (!bo->placements[i].lpfn ||
949efa246c0Sriastradh 		    (lpfn && lpfn < bo->placements[i].lpfn))
950efa246c0Sriastradh 			bo->placements[i].lpfn = lpfn;
951efa246c0Sriastradh 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
952efa246c0Sriastradh 	}
953efa246c0Sriastradh 
95441ec0267Sriastradh 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
95541ec0267Sriastradh 	if (unlikely(r)) {
95641ec0267Sriastradh 		dev_err(adev->dev, "%p pin failed\n", bo);
95741ec0267Sriastradh 		goto error;
958efa246c0Sriastradh 	}
95941ec0267Sriastradh 
96041ec0267Sriastradh 	bo->pin_count = 1;
96141ec0267Sriastradh 
96241ec0267Sriastradh 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
96341ec0267Sriastradh 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
96441ec0267Sriastradh 		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
96541ec0267Sriastradh 		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
96641ec0267Sriastradh 			     &adev->visible_pin_size);
96741ec0267Sriastradh 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
96841ec0267Sriastradh 		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
96941ec0267Sriastradh 	}
97041ec0267Sriastradh 
97141ec0267Sriastradh error:
972efa246c0Sriastradh 	return r;
973efa246c0Sriastradh }
974efa246c0Sriastradh 
97541ec0267Sriastradh /**
97641ec0267Sriastradh  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
97741ec0267Sriastradh  * @bo: &amdgpu_bo buffer object to be pinned
97841ec0267Sriastradh  * @domain: domain to be pinned to
97941ec0267Sriastradh  *
98041ec0267Sriastradh  * A simple wrapper to amdgpu_bo_pin_restricted().
98141ec0267Sriastradh  * Provides a simpler API for buffers that do not have any strict restrictions
98241ec0267Sriastradh  * on where a buffer must be located.
98341ec0267Sriastradh  *
98441ec0267Sriastradh  * Returns:
98541ec0267Sriastradh  * 0 for success or a negative error code on failure.
98641ec0267Sriastradh  */
amdgpu_bo_pin(struct amdgpu_bo * bo,u32 domain)98741ec0267Sriastradh int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
988efa246c0Sriastradh {
98941ec0267Sriastradh 	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
990efa246c0Sriastradh }
991efa246c0Sriastradh 
99241ec0267Sriastradh /**
99341ec0267Sriastradh  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
99441ec0267Sriastradh  * @bo: &amdgpu_bo buffer object to be unpinned
99541ec0267Sriastradh  *
99641ec0267Sriastradh  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
99741ec0267Sriastradh  * Changes placement and pin size accordingly.
99841ec0267Sriastradh  *
99941ec0267Sriastradh  * Returns:
100041ec0267Sriastradh  * 0 for success or a negative error code on failure.
100141ec0267Sriastradh  */
amdgpu_bo_unpin(struct amdgpu_bo * bo)1002efa246c0Sriastradh int amdgpu_bo_unpin(struct amdgpu_bo *bo)
1003efa246c0Sriastradh {
100441ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
100541ec0267Sriastradh 	struct ttm_operation_ctx ctx = { false, false };
1006efa246c0Sriastradh 	int r, i;
1007efa246c0Sriastradh 
100841ec0267Sriastradh 	if (WARN_ON_ONCE(!bo->pin_count)) {
100941ec0267Sriastradh 		dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1010efa246c0Sriastradh 		return 0;
1011efa246c0Sriastradh 	}
1012efa246c0Sriastradh 	bo->pin_count--;
1013efa246c0Sriastradh 	if (bo->pin_count)
1014efa246c0Sriastradh 		return 0;
101541ec0267Sriastradh 
101641ec0267Sriastradh 	amdgpu_bo_subtract_pin_size(bo);
101741ec0267Sriastradh 
1018efa246c0Sriastradh 	for (i = 0; i < bo->placement.num_placement; i++) {
1019efa246c0Sriastradh 		bo->placements[i].lpfn = 0;
1020efa246c0Sriastradh 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1021efa246c0Sriastradh 	}
102241ec0267Sriastradh 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
102341ec0267Sriastradh 	if (unlikely(r))
102441ec0267Sriastradh 		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
102541ec0267Sriastradh 
1026efa246c0Sriastradh 	return r;
1027efa246c0Sriastradh }
1028efa246c0Sriastradh 
102941ec0267Sriastradh /**
103041ec0267Sriastradh  * amdgpu_bo_evict_vram - evict VRAM buffers
103141ec0267Sriastradh  * @adev: amdgpu device object
103241ec0267Sriastradh  *
103341ec0267Sriastradh  * Evicts all VRAM buffers on the lru list of the memory type.
103441ec0267Sriastradh  * Mainly used for evicting vram at suspend time.
103541ec0267Sriastradh  *
103641ec0267Sriastradh  * Returns:
103741ec0267Sriastradh  * 0 for success or a negative error code on failure.
103841ec0267Sriastradh  */
amdgpu_bo_evict_vram(struct amdgpu_device * adev)1039efa246c0Sriastradh int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1040efa246c0Sriastradh {
1041efa246c0Sriastradh 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
104241ec0267Sriastradh #ifndef CONFIG_HIBERNATION
104341ec0267Sriastradh 	if (adev->flags & AMD_IS_APU) {
1044efa246c0Sriastradh 		/* Useless to evict on IGP chips */
1045efa246c0Sriastradh 		return 0;
1046efa246c0Sriastradh 	}
104741ec0267Sriastradh #endif
1048efa246c0Sriastradh 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1049efa246c0Sriastradh }
1050efa246c0Sriastradh 
105141ec0267Sriastradh static const char *amdgpu_vram_names[] = {
105241ec0267Sriastradh 	"UNKNOWN",
105341ec0267Sriastradh 	"GDDR1",
105441ec0267Sriastradh 	"DDR2",
105541ec0267Sriastradh 	"GDDR3",
105641ec0267Sriastradh 	"GDDR4",
105741ec0267Sriastradh 	"GDDR5",
105841ec0267Sriastradh 	"HBM",
105941ec0267Sriastradh 	"DDR3",
106041ec0267Sriastradh 	"DDR4",
106141ec0267Sriastradh 	"GDDR6",
106241ec0267Sriastradh };
1063efa246c0Sriastradh 
106441ec0267Sriastradh /**
106541ec0267Sriastradh  * amdgpu_bo_init - initialize memory manager
106641ec0267Sriastradh  * @adev: amdgpu device object
106741ec0267Sriastradh  *
106841ec0267Sriastradh  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
106941ec0267Sriastradh  *
107041ec0267Sriastradh  * Returns:
107141ec0267Sriastradh  * 0 for success or a negative error code on failure.
107241ec0267Sriastradh  */
amdgpu_bo_init(struct amdgpu_device * adev)1073efa246c0Sriastradh int amdgpu_bo_init(struct amdgpu_device *adev)
1074efa246c0Sriastradh {
107541ec0267Sriastradh 	/* reserve PAT memory space to WC for VRAM */
107641ec0267Sriastradh 	arch_io_reserve_memtype_wc(adev->gmc.aper_base,
107741ec0267Sriastradh 				   adev->gmc.aper_size);
107841ec0267Sriastradh 
1079efa246c0Sriastradh 	/* Add an MTRR for the VRAM */
108041ec0267Sriastradh 	adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
108141ec0267Sriastradh 					      adev->gmc.aper_size);
1082*4bc47a45Sriastradh #ifdef __NetBSD__
1083*4bc47a45Sriastradh 	if (adev->gmc.aper_base)
1084*4bc47a45Sriastradh 		pmap_pv_track(adev->gmc.aper_base, adev->gmc.aper_size);
1085*4bc47a45Sriastradh #endif
10860d50c49dSriastradh 	DRM_INFO("Detected VRAM RAM=%"PRIu64"M, BAR=%lluM\n",
108741ec0267Sriastradh 		 adev->gmc.mc_vram_size >> 20,
108841ec0267Sriastradh 		 (unsigned long long)adev->gmc.aper_size >> 20);
108941ec0267Sriastradh 	DRM_INFO("RAM width %dbits %s\n",
109041ec0267Sriastradh 		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1091efa246c0Sriastradh 	return amdgpu_ttm_init(adev);
1092efa246c0Sriastradh }
1093efa246c0Sriastradh 
109441ec0267Sriastradh /**
109541ec0267Sriastradh  * amdgpu_bo_late_init - late init
109641ec0267Sriastradh  * @adev: amdgpu device object
109741ec0267Sriastradh  *
109841ec0267Sriastradh  * Calls amdgpu_ttm_late_init() to free resources used earlier during
109941ec0267Sriastradh  * initialization.
110041ec0267Sriastradh  *
110141ec0267Sriastradh  * Returns:
110241ec0267Sriastradh  * 0 for success or a negative error code on failure.
110341ec0267Sriastradh  */
amdgpu_bo_late_init(struct amdgpu_device * adev)110441ec0267Sriastradh int amdgpu_bo_late_init(struct amdgpu_device *adev)
110541ec0267Sriastradh {
110641ec0267Sriastradh 	amdgpu_ttm_late_init(adev);
110741ec0267Sriastradh 
110841ec0267Sriastradh 	return 0;
110941ec0267Sriastradh }
111041ec0267Sriastradh 
111141ec0267Sriastradh /**
111241ec0267Sriastradh  * amdgpu_bo_fini - tear down memory manager
111341ec0267Sriastradh  * @adev: amdgpu device object
111441ec0267Sriastradh  *
111541ec0267Sriastradh  * Reverses amdgpu_bo_init() to tear down memory manager.
111641ec0267Sriastradh  */
amdgpu_bo_fini(struct amdgpu_device * adev)1117efa246c0Sriastradh void amdgpu_bo_fini(struct amdgpu_device *adev)
1118efa246c0Sriastradh {
1119efa246c0Sriastradh 	amdgpu_ttm_fini(adev);
1120*4bc47a45Sriastradh #ifdef __NetBSD__
1121*4bc47a45Sriastradh 	if (adev->gmc.aper_base)
1122*4bc47a45Sriastradh 		pmap_pv_untrack(adev->gmc.aper_base, adev->gmc.aper_size);
1123*4bc47a45Sriastradh #endif
112441ec0267Sriastradh 	arch_phys_wc_del(adev->gmc.vram_mtrr);
112541ec0267Sriastradh 	arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1126efa246c0Sriastradh }
1127efa246c0Sriastradh 
11280d50c49dSriastradh #ifndef __NetBSD__		/* XXX unused? */
112941ec0267Sriastradh /**
113041ec0267Sriastradh  * amdgpu_bo_fbdev_mmap - mmap fbdev memory
113141ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
113241ec0267Sriastradh  * @vma: vma as input from the fbdev mmap method
113341ec0267Sriastradh  *
113441ec0267Sriastradh  * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
113541ec0267Sriastradh  *
113641ec0267Sriastradh  * Returns:
113741ec0267Sriastradh  * 0 for success or a negative error code on failure.
113841ec0267Sriastradh  */
amdgpu_bo_fbdev_mmap(struct amdgpu_bo * bo,struct vm_area_struct * vma)1139efa246c0Sriastradh int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1140efa246c0Sriastradh 			     struct vm_area_struct *vma)
1141efa246c0Sriastradh {
114241ec0267Sriastradh 	if (vma->vm_pgoff != 0)
114341ec0267Sriastradh 		return -EACCES;
114441ec0267Sriastradh 
114541ec0267Sriastradh 	return ttm_bo_mmap_obj(vma, &bo->tbo);
1146efa246c0Sriastradh }
11470d50c49dSriastradh #endif
1148efa246c0Sriastradh 
114941ec0267Sriastradh /**
115041ec0267Sriastradh  * amdgpu_bo_set_tiling_flags - set tiling flags
115141ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
115241ec0267Sriastradh  * @tiling_flags: new flags
115341ec0267Sriastradh  *
115441ec0267Sriastradh  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
115541ec0267Sriastradh  * kernel driver to set the tiling flags on a buffer.
115641ec0267Sriastradh  *
115741ec0267Sriastradh  * Returns:
115841ec0267Sriastradh  * 0 for success or a negative error code on failure.
115941ec0267Sriastradh  */
amdgpu_bo_set_tiling_flags(struct amdgpu_bo * bo,u64 tiling_flags)1160efa246c0Sriastradh int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1161efa246c0Sriastradh {
116241ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
116341ec0267Sriastradh 
116441ec0267Sriastradh 	if (adev->family <= AMDGPU_FAMILY_CZ &&
116541ec0267Sriastradh 	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1166efa246c0Sriastradh 		return -EINVAL;
1167efa246c0Sriastradh 
1168efa246c0Sriastradh 	bo->tiling_flags = tiling_flags;
1169efa246c0Sriastradh 	return 0;
1170efa246c0Sriastradh }
1171efa246c0Sriastradh 
117241ec0267Sriastradh /**
117341ec0267Sriastradh  * amdgpu_bo_get_tiling_flags - get tiling flags
117441ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
117541ec0267Sriastradh  * @tiling_flags: returned flags
117641ec0267Sriastradh  *
117741ec0267Sriastradh  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
117841ec0267Sriastradh  * set the tiling flags on a buffer.
117941ec0267Sriastradh  */
amdgpu_bo_get_tiling_flags(struct amdgpu_bo * bo,u64 * tiling_flags)1180efa246c0Sriastradh void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1181efa246c0Sriastradh {
118241ec0267Sriastradh 	dma_resv_assert_held(bo->tbo.base.resv);
1183efa246c0Sriastradh 
1184efa246c0Sriastradh 	if (tiling_flags)
1185efa246c0Sriastradh 		*tiling_flags = bo->tiling_flags;
1186efa246c0Sriastradh }
1187efa246c0Sriastradh 
118841ec0267Sriastradh /**
118941ec0267Sriastradh  * amdgpu_bo_set_metadata - set metadata
119041ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
119141ec0267Sriastradh  * @metadata: new metadata
119241ec0267Sriastradh  * @metadata_size: size of the new metadata
119341ec0267Sriastradh  * @flags: flags of the new metadata
119441ec0267Sriastradh  *
119541ec0267Sriastradh  * Sets buffer object's metadata, its size and flags.
119641ec0267Sriastradh  * Used via GEM ioctl.
119741ec0267Sriastradh  *
119841ec0267Sriastradh  * Returns:
119941ec0267Sriastradh  * 0 for success or a negative error code on failure.
120041ec0267Sriastradh  */
amdgpu_bo_set_metadata(struct amdgpu_bo * bo,void * metadata,uint32_t metadata_size,uint64_t flags)1201efa246c0Sriastradh int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1202efa246c0Sriastradh 			    uint32_t metadata_size, uint64_t flags)
1203efa246c0Sriastradh {
1204efa246c0Sriastradh 	void *buffer;
1205efa246c0Sriastradh 
1206efa246c0Sriastradh 	if (!metadata_size) {
1207efa246c0Sriastradh 		if (bo->metadata_size) {
1208efa246c0Sriastradh 			kfree(bo->metadata);
1209efa246c0Sriastradh 			bo->metadata = NULL;
1210efa246c0Sriastradh 			bo->metadata_size = 0;
1211efa246c0Sriastradh 		}
1212efa246c0Sriastradh 		return 0;
1213efa246c0Sriastradh 	}
1214efa246c0Sriastradh 
1215efa246c0Sriastradh 	if (metadata == NULL)
1216efa246c0Sriastradh 		return -EINVAL;
1217efa246c0Sriastradh 
1218efa246c0Sriastradh 	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1219efa246c0Sriastradh 	if (buffer == NULL)
1220efa246c0Sriastradh 		return -ENOMEM;
1221efa246c0Sriastradh 
1222efa246c0Sriastradh 	kfree(bo->metadata);
1223efa246c0Sriastradh 	bo->metadata_flags = flags;
1224efa246c0Sriastradh 	bo->metadata = buffer;
1225efa246c0Sriastradh 	bo->metadata_size = metadata_size;
1226efa246c0Sriastradh 
1227efa246c0Sriastradh 	return 0;
1228efa246c0Sriastradh }
1229efa246c0Sriastradh 
123041ec0267Sriastradh /**
123141ec0267Sriastradh  * amdgpu_bo_get_metadata - get metadata
123241ec0267Sriastradh  * @bo: &amdgpu_bo buffer object
123341ec0267Sriastradh  * @buffer: returned metadata
123441ec0267Sriastradh  * @buffer_size: size of the buffer
123541ec0267Sriastradh  * @metadata_size: size of the returned metadata
123641ec0267Sriastradh  * @flags: flags of the returned metadata
123741ec0267Sriastradh  *
123841ec0267Sriastradh  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
123941ec0267Sriastradh  * less than metadata_size.
124041ec0267Sriastradh  * Used via GEM ioctl.
124141ec0267Sriastradh  *
124241ec0267Sriastradh  * Returns:
124341ec0267Sriastradh  * 0 for success or a negative error code on failure.
124441ec0267Sriastradh  */
amdgpu_bo_get_metadata(struct amdgpu_bo * bo,void * buffer,size_t buffer_size,uint32_t * metadata_size,uint64_t * flags)1245efa246c0Sriastradh int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1246efa246c0Sriastradh 			   size_t buffer_size, uint32_t *metadata_size,
1247efa246c0Sriastradh 			   uint64_t *flags)
1248efa246c0Sriastradh {
1249efa246c0Sriastradh 	if (!buffer && !metadata_size)
1250efa246c0Sriastradh 		return -EINVAL;
1251efa246c0Sriastradh 
1252efa246c0Sriastradh 	if (buffer) {
1253efa246c0Sriastradh 		if (buffer_size < bo->metadata_size)
1254efa246c0Sriastradh 			return -EINVAL;
1255efa246c0Sriastradh 
1256efa246c0Sriastradh 		if (bo->metadata_size)
1257efa246c0Sriastradh 			memcpy(buffer, bo->metadata, bo->metadata_size);
1258efa246c0Sriastradh 	}
1259efa246c0Sriastradh 
1260efa246c0Sriastradh 	if (metadata_size)
1261efa246c0Sriastradh 		*metadata_size = bo->metadata_size;
1262efa246c0Sriastradh 	if (flags)
1263efa246c0Sriastradh 		*flags = bo->metadata_flags;
1264efa246c0Sriastradh 
1265efa246c0Sriastradh 	return 0;
1266efa246c0Sriastradh }
1267efa246c0Sriastradh 
126841ec0267Sriastradh /**
126941ec0267Sriastradh  * amdgpu_bo_move_notify - notification about a memory move
127041ec0267Sriastradh  * @bo: pointer to a buffer object
127141ec0267Sriastradh  * @evict: if this move is evicting the buffer from the graphics address space
127241ec0267Sriastradh  * @new_mem: new information of the bufer object
127341ec0267Sriastradh  *
127441ec0267Sriastradh  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
127541ec0267Sriastradh  * bookkeeping.
127641ec0267Sriastradh  * TTM driver callback which is called when ttm moves a buffer.
127741ec0267Sriastradh  */
amdgpu_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_mem_reg * new_mem)1278efa246c0Sriastradh void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
127941ec0267Sriastradh 			   bool evict,
1280efa246c0Sriastradh 			   struct ttm_mem_reg *new_mem)
1281efa246c0Sriastradh {
128241ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
128341ec0267Sriastradh 	struct amdgpu_bo *abo;
128441ec0267Sriastradh 	struct ttm_mem_reg *old_mem = &bo->mem;
1285efa246c0Sriastradh 
128641ec0267Sriastradh 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1287efa246c0Sriastradh 		return;
1288efa246c0Sriastradh 
128941ec0267Sriastradh 	abo = ttm_to_amdgpu_bo(bo);
129041ec0267Sriastradh 	amdgpu_vm_bo_invalidate(adev, abo, evict);
129141ec0267Sriastradh 
129241ec0267Sriastradh 	amdgpu_bo_kunmap(abo);
129341ec0267Sriastradh 
129441ec0267Sriastradh 	/* remember the eviction */
129541ec0267Sriastradh 	if (evict)
129641ec0267Sriastradh 		atomic64_inc(&adev->num_evictions);
1297efa246c0Sriastradh 
1298efa246c0Sriastradh 	/* update statistics */
1299efa246c0Sriastradh 	if (!new_mem)
1300efa246c0Sriastradh 		return;
1301efa246c0Sriastradh 
1302efa246c0Sriastradh 	/* move_notify is called before move happens */
130341ec0267Sriastradh 	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1304efa246c0Sriastradh }
1305efa246c0Sriastradh 
130641ec0267Sriastradh /**
130741ec0267Sriastradh  * amdgpu_bo_move_notify - notification about a BO being released
130841ec0267Sriastradh  * @bo: pointer to a buffer object
130941ec0267Sriastradh  *
131041ec0267Sriastradh  * Wipes VRAM buffers whose contents should not be leaked before the
131141ec0267Sriastradh  * memory is released.
131241ec0267Sriastradh  */
amdgpu_bo_release_notify(struct ttm_buffer_object * bo)131341ec0267Sriastradh void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
131441ec0267Sriastradh {
131541ec0267Sriastradh 	struct dma_fence *fence = NULL;
131641ec0267Sriastradh 	struct amdgpu_bo *abo;
131741ec0267Sriastradh 	int r;
131841ec0267Sriastradh 
131941ec0267Sriastradh 	if (!amdgpu_bo_is_amdgpu_bo(bo))
132041ec0267Sriastradh 		return;
132141ec0267Sriastradh 
132241ec0267Sriastradh 	abo = ttm_to_amdgpu_bo(bo);
132341ec0267Sriastradh 
132441ec0267Sriastradh 	if (abo->kfd_bo)
132541ec0267Sriastradh 		amdgpu_amdkfd_unreserve_memory_limit(abo);
132641ec0267Sriastradh 
132741ec0267Sriastradh 	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
132841ec0267Sriastradh 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
132941ec0267Sriastradh 		return;
133041ec0267Sriastradh 
133141ec0267Sriastradh 	dma_resv_lock(bo->base.resv, NULL);
133241ec0267Sriastradh 
133341ec0267Sriastradh 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
133441ec0267Sriastradh 	if (!WARN_ON(r)) {
133541ec0267Sriastradh 		amdgpu_bo_fence(abo, fence, false);
133641ec0267Sriastradh 		dma_fence_put(fence);
133741ec0267Sriastradh 	}
133841ec0267Sriastradh 
133941ec0267Sriastradh 	dma_resv_unlock(bo->base.resv);
134041ec0267Sriastradh }
134141ec0267Sriastradh 
134241ec0267Sriastradh /**
134341ec0267Sriastradh  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
134441ec0267Sriastradh  * @bo: pointer to a buffer object
134541ec0267Sriastradh  *
134641ec0267Sriastradh  * Notifies the driver we are taking a fault on this BO and have reserved it,
134741ec0267Sriastradh  * also performs bookkeeping.
134841ec0267Sriastradh  * TTM driver callback for dealing with vm faults.
134941ec0267Sriastradh  *
135041ec0267Sriastradh  * Returns:
135141ec0267Sriastradh  * 0 for success or a negative error code on failure.
135241ec0267Sriastradh  */
amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object * bo)1353efa246c0Sriastradh int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1354efa246c0Sriastradh {
135541ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
135641ec0267Sriastradh 	struct ttm_operation_ctx ctx = { false, false };
1357efa246c0Sriastradh 	struct amdgpu_bo *abo;
135841ec0267Sriastradh 	unsigned long offset, size;
135941ec0267Sriastradh 	int r;
1360efa246c0Sriastradh 
136141ec0267Sriastradh 	if (!amdgpu_bo_is_amdgpu_bo(bo))
1362efa246c0Sriastradh 		return 0;
1363efa246c0Sriastradh 
136441ec0267Sriastradh 	abo = ttm_to_amdgpu_bo(bo);
136541ec0267Sriastradh 
136641ec0267Sriastradh 	/* Remember that this BO was accessed by the CPU */
136741ec0267Sriastradh 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
136841ec0267Sriastradh 
1369efa246c0Sriastradh 	if (bo->mem.mem_type != TTM_PL_VRAM)
1370efa246c0Sriastradh 		return 0;
1371efa246c0Sriastradh 
1372efa246c0Sriastradh 	size = bo->mem.num_pages << PAGE_SHIFT;
1373efa246c0Sriastradh 	offset = bo->mem.start << PAGE_SHIFT;
137441ec0267Sriastradh 	if ((offset + size) <= adev->gmc.visible_vram_size)
1375efa246c0Sriastradh 		return 0;
1376efa246c0Sriastradh 
137741ec0267Sriastradh 	/* Can't move a pinned BO to visible VRAM */
137841ec0267Sriastradh 	if (abo->pin_count > 0)
137941ec0267Sriastradh 		return -EINVAL;
138041ec0267Sriastradh 
1381efa246c0Sriastradh 	/* hurrah the memory is not visible ! */
138241ec0267Sriastradh 	atomic64_inc(&adev->num_vram_cpu_page_faults);
138341ec0267Sriastradh 	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
138441ec0267Sriastradh 					AMDGPU_GEM_DOMAIN_GTT);
138541ec0267Sriastradh 
138641ec0267Sriastradh 	/* Avoid costly evictions; only set GTT as a busy placement */
138741ec0267Sriastradh 	abo->placement.num_busy_placement = 1;
138841ec0267Sriastradh 	abo->placement.busy_placement = &abo->placements[1];
138941ec0267Sriastradh 
139041ec0267Sriastradh 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
139141ec0267Sriastradh 	if (unlikely(r != 0))
1392efa246c0Sriastradh 		return r;
1393efa246c0Sriastradh 
1394efa246c0Sriastradh 	offset = bo->mem.start << PAGE_SHIFT;
1395efa246c0Sriastradh 	/* this should never happen */
139641ec0267Sriastradh 	if (bo->mem.mem_type == TTM_PL_VRAM &&
139741ec0267Sriastradh 	    (offset + size) > adev->gmc.visible_vram_size)
1398efa246c0Sriastradh 		return -EINVAL;
1399efa246c0Sriastradh 
1400efa246c0Sriastradh 	return 0;
1401efa246c0Sriastradh }
1402efa246c0Sriastradh 
1403efa246c0Sriastradh /**
1404efa246c0Sriastradh  * amdgpu_bo_fence - add fence to buffer object
1405efa246c0Sriastradh  *
1406efa246c0Sriastradh  * @bo: buffer object in question
1407efa246c0Sriastradh  * @fence: fence to add
1408efa246c0Sriastradh  * @shared: true if fence should be added shared
1409efa246c0Sriastradh  *
1410efa246c0Sriastradh  */
amdgpu_bo_fence(struct amdgpu_bo * bo,struct dma_fence * fence,bool shared)141141ec0267Sriastradh void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1412efa246c0Sriastradh 		     bool shared)
1413efa246c0Sriastradh {
141441ec0267Sriastradh 	struct dma_resv *resv = bo->tbo.base.resv;
1415efa246c0Sriastradh 
1416efa246c0Sriastradh 	if (shared)
141741ec0267Sriastradh 		dma_resv_add_shared_fence(resv, fence);
1418efa246c0Sriastradh 	else
141941ec0267Sriastradh 		dma_resv_add_excl_fence(resv, fence);
142041ec0267Sriastradh }
142141ec0267Sriastradh 
142241ec0267Sriastradh /**
142341ec0267Sriastradh  * amdgpu_sync_wait_resv - Wait for BO reservation fences
142441ec0267Sriastradh  *
142541ec0267Sriastradh  * @bo: buffer object
142641ec0267Sriastradh  * @owner: fence owner
142741ec0267Sriastradh  * @intr: Whether the wait is interruptible
142841ec0267Sriastradh  *
142941ec0267Sriastradh  * Returns:
143041ec0267Sriastradh  * 0 on success, errno otherwise.
143141ec0267Sriastradh  */
amdgpu_bo_sync_wait(struct amdgpu_bo * bo,void * owner,bool intr)143241ec0267Sriastradh int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
143341ec0267Sriastradh {
143441ec0267Sriastradh 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
143541ec0267Sriastradh 	struct amdgpu_sync sync;
143641ec0267Sriastradh 	int r;
143741ec0267Sriastradh 
143841ec0267Sriastradh 	amdgpu_sync_create(&sync);
143941ec0267Sriastradh 	amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
144041ec0267Sriastradh 	r = amdgpu_sync_wait(&sync, intr);
144141ec0267Sriastradh 	amdgpu_sync_free(&sync);
144241ec0267Sriastradh 
144341ec0267Sriastradh 	return r;
144441ec0267Sriastradh }
144541ec0267Sriastradh 
144641ec0267Sriastradh /**
144741ec0267Sriastradh  * amdgpu_bo_gpu_offset - return GPU offset of bo
144841ec0267Sriastradh  * @bo:	amdgpu object for which we query the offset
144941ec0267Sriastradh  *
145041ec0267Sriastradh  * Note: object should either be pinned or reserved when calling this
145141ec0267Sriastradh  * function, it might be useful to add check for this for debugging.
145241ec0267Sriastradh  *
145341ec0267Sriastradh  * Returns:
145441ec0267Sriastradh  * current GPU offset of the object.
145541ec0267Sriastradh  */
amdgpu_bo_gpu_offset(struct amdgpu_bo * bo)145641ec0267Sriastradh u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
145741ec0267Sriastradh {
145841ec0267Sriastradh 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
145941ec0267Sriastradh 	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
146041ec0267Sriastradh 		     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
146141ec0267Sriastradh 	WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
146241ec0267Sriastradh 	WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
146341ec0267Sriastradh 		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
146441ec0267Sriastradh 
146541ec0267Sriastradh 	return amdgpu_gmc_sign_extend(bo->tbo.offset);
146641ec0267Sriastradh }
146741ec0267Sriastradh 
146841ec0267Sriastradh /**
146941ec0267Sriastradh  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
147041ec0267Sriastradh  * @adev: amdgpu device object
147141ec0267Sriastradh  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
147241ec0267Sriastradh  *
147341ec0267Sriastradh  * Returns:
147441ec0267Sriastradh  * Which of the allowed domains is preferred for pinning the BO for scanout.
147541ec0267Sriastradh  */
amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device * adev,uint32_t domain)147641ec0267Sriastradh uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
147741ec0267Sriastradh 					    uint32_t domain)
147841ec0267Sriastradh {
147941ec0267Sriastradh 	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
148041ec0267Sriastradh 		domain = AMDGPU_GEM_DOMAIN_VRAM;
148141ec0267Sriastradh 		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
148241ec0267Sriastradh 			domain = AMDGPU_GEM_DOMAIN_GTT;
148341ec0267Sriastradh 	}
148441ec0267Sriastradh 	return domain;
1485efa246c0Sriastradh }
1486