1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2009 Jerome Glisse.
3b843c749SSergey Zigachev * All Rights Reserved.
4b843c749SSergey Zigachev *
5b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
6b843c749SSergey Zigachev * copy of this software and associated documentation files (the
7b843c749SSergey Zigachev * "Software"), to deal in the Software without restriction, including
8b843c749SSergey Zigachev * without limitation the rights to use, copy, modify, merge, publish,
9b843c749SSergey Zigachev * distribute, sub license, and/or sell copies of the Software, and to
10b843c749SSergey Zigachev * permit persons to whom the Software is furnished to do so, subject to
11b843c749SSergey Zigachev * the following conditions:
12b843c749SSergey Zigachev *
13b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16b843c749SSergey Zigachev * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17b843c749SSergey Zigachev * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18b843c749SSergey Zigachev * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19b843c749SSergey Zigachev * USE OR OTHER DEALINGS IN THE SOFTWARE.
20b843c749SSergey Zigachev *
21b843c749SSergey Zigachev * The above copyright notice and this permission notice (including the
22b843c749SSergey Zigachev * next paragraph) shall be included in all copies or substantial portions
23b843c749SSergey Zigachev * of the Software.
24b843c749SSergey Zigachev *
25b843c749SSergey Zigachev */
26b843c749SSergey Zigachev /*
27b843c749SSergey Zigachev * Authors:
28b843c749SSergey Zigachev * Jerome Glisse <glisse@freedesktop.org>
29b843c749SSergey Zigachev * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30b843c749SSergey Zigachev * Dave Airlie
31b843c749SSergey Zigachev */
32b843c749SSergey Zigachev #include <linux/list.h>
33b843c749SSergey Zigachev #include <linux/slab.h>
34b843c749SSergey Zigachev #include <drm/drmP.h>
35b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
36b843c749SSergey Zigachev #include <drm/drm_cache.h>
37b843c749SSergey Zigachev #include "amdgpu.h"
38b843c749SSergey Zigachev #include "amdgpu_trace.h"
39b843c749SSergey Zigachev #include "amdgpu_amdkfd.h"
40b843c749SSergey Zigachev
41*78973132SSergey Zigachev /* undo vm namespace pollution */
42*78973132SSergey Zigachev #undef min_offset
43*78973132SSergey Zigachev #undef max_offset
44*78973132SSergey Zigachev
45*78973132SSergey Zigachev
46b843c749SSergey Zigachev /**
47b843c749SSergey Zigachev * DOC: amdgpu_object
48b843c749SSergey Zigachev *
49b843c749SSergey Zigachev * This defines the interfaces to operate on an &amdgpu_bo buffer object which
50b843c749SSergey Zigachev * represents memory used by driver (VRAM, system memory, etc.). The driver
51b843c749SSergey Zigachev * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
52b843c749SSergey Zigachev * to create/destroy/set buffer object which are then managed by the kernel TTM
53b843c749SSergey Zigachev * memory manager.
54b843c749SSergey Zigachev * The interfaces are also used internally by kernel clients, including gfx,
55b843c749SSergey Zigachev * uvd, etc. for kernel managed allocations used by the GPU.
56b843c749SSergey Zigachev *
57b843c749SSergey Zigachev */
58b843c749SSergey Zigachev
amdgpu_bo_need_backup(struct amdgpu_device * adev)59b843c749SSergey Zigachev static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
60b843c749SSergey Zigachev {
61b843c749SSergey Zigachev if (adev->flags & AMD_IS_APU)
62b843c749SSergey Zigachev return false;
63b843c749SSergey Zigachev
64b843c749SSergey Zigachev if (amdgpu_gpu_recovery == 0 ||
65b843c749SSergey Zigachev (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
66b843c749SSergey Zigachev return false;
67b843c749SSergey Zigachev
68b843c749SSergey Zigachev return true;
69b843c749SSergey Zigachev }
70b843c749SSergey Zigachev
71b843c749SSergey Zigachev /**
72b843c749SSergey Zigachev * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
73b843c749SSergey Zigachev *
74b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
75b843c749SSergey Zigachev *
76b843c749SSergey Zigachev * This function is called when a BO stops being pinned, and updates the
77b843c749SSergey Zigachev * &amdgpu_device pin_size values accordingly.
78b843c749SSergey Zigachev */
amdgpu_bo_subtract_pin_size(struct amdgpu_bo * bo)79b843c749SSergey Zigachev static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
80b843c749SSergey Zigachev {
81b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
82b843c749SSergey Zigachev
83b843c749SSergey Zigachev if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
84b843c749SSergey Zigachev atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
85b843c749SSergey Zigachev atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
86b843c749SSergey Zigachev &adev->visible_pin_size);
87b843c749SSergey Zigachev } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
88b843c749SSergey Zigachev atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
89b843c749SSergey Zigachev }
90b843c749SSergey Zigachev }
91b843c749SSergey Zigachev
amdgpu_bo_destroy(struct ttm_buffer_object * tbo)92b843c749SSergey Zigachev static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
93b843c749SSergey Zigachev {
94b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
95b843c749SSergey Zigachev struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
96b843c749SSergey Zigachev
97b843c749SSergey Zigachev if (bo->pin_count > 0)
98b843c749SSergey Zigachev amdgpu_bo_subtract_pin_size(bo);
99b843c749SSergey Zigachev
100b843c749SSergey Zigachev if (bo->kfd_bo)
101b843c749SSergey Zigachev amdgpu_amdkfd_unreserve_system_memory_limit(bo);
102b843c749SSergey Zigachev
103b843c749SSergey Zigachev amdgpu_bo_kunmap(bo);
104b843c749SSergey Zigachev
105b843c749SSergey Zigachev if (bo->gem_base.import_attach)
106b843c749SSergey Zigachev drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
107b843c749SSergey Zigachev drm_gem_object_release(&bo->gem_base);
108b843c749SSergey Zigachev amdgpu_bo_unref(&bo->parent);
109b843c749SSergey Zigachev if (!list_empty(&bo->shadow_list)) {
110b843c749SSergey Zigachev mutex_lock(&adev->shadow_list_lock);
111b843c749SSergey Zigachev list_del_init(&bo->shadow_list);
112b843c749SSergey Zigachev mutex_unlock(&adev->shadow_list_lock);
113b843c749SSergey Zigachev }
114b843c749SSergey Zigachev kfree(bo->metadata);
115b843c749SSergey Zigachev kfree(bo);
116b843c749SSergey Zigachev }
117b843c749SSergey Zigachev
118b843c749SSergey Zigachev /**
119b843c749SSergey Zigachev * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
120b843c749SSergey Zigachev * @bo: buffer object to be checked
121b843c749SSergey Zigachev *
122b843c749SSergey Zigachev * Uses destroy function associated with the object to determine if this is
123b843c749SSergey Zigachev * an &amdgpu_bo.
124b843c749SSergey Zigachev *
125b843c749SSergey Zigachev * Returns:
126b843c749SSergey Zigachev * true if the object belongs to &amdgpu_bo, false if not.
127b843c749SSergey Zigachev */
amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object * bo)128b843c749SSergey Zigachev bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
129b843c749SSergey Zigachev {
130b843c749SSergey Zigachev if (bo->destroy == &amdgpu_bo_destroy)
131b843c749SSergey Zigachev return true;
132b843c749SSergey Zigachev return false;
133b843c749SSergey Zigachev }
134b843c749SSergey Zigachev
135b843c749SSergey Zigachev /**
136b843c749SSergey Zigachev * amdgpu_bo_placement_from_domain - set buffer's placement
137b843c749SSergey Zigachev * @abo: &amdgpu_bo buffer object whose placement is to be set
138b843c749SSergey Zigachev * @domain: requested domain
139b843c749SSergey Zigachev *
140b843c749SSergey Zigachev * Sets buffer's placement according to requested domain and the buffer's
141b843c749SSergey Zigachev * flags.
142b843c749SSergey Zigachev */
amdgpu_bo_placement_from_domain(struct amdgpu_bo * abo,u32 domain)143b843c749SSergey Zigachev void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
144b843c749SSergey Zigachev {
145b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
146b843c749SSergey Zigachev struct ttm_placement *placement = &abo->placement;
147b843c749SSergey Zigachev struct ttm_place *places = abo->placements;
148b843c749SSergey Zigachev u64 flags = abo->flags;
149b843c749SSergey Zigachev u32 c = 0;
150b843c749SSergey Zigachev
151b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
152b843c749SSergey Zigachev unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
153b843c749SSergey Zigachev
154b843c749SSergey Zigachev places[c].fpfn = 0;
155b843c749SSergey Zigachev places[c].lpfn = 0;
156b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
157b843c749SSergey Zigachev TTM_PL_FLAG_VRAM;
158b843c749SSergey Zigachev
159b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
160b843c749SSergey Zigachev places[c].lpfn = visible_pfn;
161b843c749SSergey Zigachev else
162b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_TOPDOWN;
163b843c749SSergey Zigachev
164b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
165b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
166b843c749SSergey Zigachev c++;
167b843c749SSergey Zigachev }
168b843c749SSergey Zigachev
169b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_GTT) {
170b843c749SSergey Zigachev places[c].fpfn = 0;
171b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_SHADOW)
172b843c749SSergey Zigachev places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
173b843c749SSergey Zigachev else
174b843c749SSergey Zigachev places[c].lpfn = 0;
175b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_TT;
176b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
177b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_WC |
178b843c749SSergey Zigachev TTM_PL_FLAG_UNCACHED;
179b843c749SSergey Zigachev else
180b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_CACHED;
181b843c749SSergey Zigachev c++;
182b843c749SSergey Zigachev }
183b843c749SSergey Zigachev
184b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_CPU) {
185b843c749SSergey Zigachev places[c].fpfn = 0;
186b843c749SSergey Zigachev places[c].lpfn = 0;
187b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_SYSTEM;
188b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
189b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_WC |
190b843c749SSergey Zigachev TTM_PL_FLAG_UNCACHED;
191b843c749SSergey Zigachev else
192b843c749SSergey Zigachev places[c].flags |= TTM_PL_FLAG_CACHED;
193b843c749SSergey Zigachev c++;
194b843c749SSergey Zigachev }
195b843c749SSergey Zigachev
196b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_GDS) {
197b843c749SSergey Zigachev places[c].fpfn = 0;
198b843c749SSergey Zigachev places[c].lpfn = 0;
199b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
200b843c749SSergey Zigachev c++;
201b843c749SSergey Zigachev }
202b843c749SSergey Zigachev
203b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_GWS) {
204b843c749SSergey Zigachev places[c].fpfn = 0;
205b843c749SSergey Zigachev places[c].lpfn = 0;
206b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
207b843c749SSergey Zigachev c++;
208b843c749SSergey Zigachev }
209b843c749SSergey Zigachev
210b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_OA) {
211b843c749SSergey Zigachev places[c].fpfn = 0;
212b843c749SSergey Zigachev places[c].lpfn = 0;
213b843c749SSergey Zigachev places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
214b843c749SSergey Zigachev c++;
215b843c749SSergey Zigachev }
216b843c749SSergey Zigachev
217b843c749SSergey Zigachev if (!c) {
218b843c749SSergey Zigachev places[c].fpfn = 0;
219b843c749SSergey Zigachev places[c].lpfn = 0;
220b843c749SSergey Zigachev places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
221b843c749SSergey Zigachev c++;
222b843c749SSergey Zigachev }
223b843c749SSergey Zigachev
224b843c749SSergey Zigachev BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
225b843c749SSergey Zigachev
226b843c749SSergey Zigachev placement->num_placement = c;
227b843c749SSergey Zigachev placement->placement = places;
228b843c749SSergey Zigachev
229b843c749SSergey Zigachev placement->num_busy_placement = c;
230b843c749SSergey Zigachev placement->busy_placement = places;
231b843c749SSergey Zigachev }
232b843c749SSergey Zigachev
233b843c749SSergey Zigachev /**
234b843c749SSergey Zigachev * amdgpu_bo_create_reserved - create reserved BO for kernel use
235b843c749SSergey Zigachev *
236b843c749SSergey Zigachev * @adev: amdgpu device object
237b843c749SSergey Zigachev * @size: size for the new BO
238b843c749SSergey Zigachev * @align: alignment for the new BO
239b843c749SSergey Zigachev * @domain: where to place it
240b843c749SSergey Zigachev * @bo_ptr: used to initialize BOs in structures
241b843c749SSergey Zigachev * @gpu_addr: GPU addr of the pinned BO
242b843c749SSergey Zigachev * @cpu_addr: optional CPU address mapping
243b843c749SSergey Zigachev *
244b843c749SSergey Zigachev * Allocates and pins a BO for kernel internal use, and returns it still
245b843c749SSergey Zigachev * reserved.
246b843c749SSergey Zigachev *
247b843c749SSergey Zigachev * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
248b843c749SSergey Zigachev *
249b843c749SSergey Zigachev * Returns:
250b843c749SSergey Zigachev * 0 on success, negative error code otherwise.
251b843c749SSergey Zigachev */
amdgpu_bo_create_reserved(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)252b843c749SSergey Zigachev int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
253b843c749SSergey Zigachev unsigned long size, int align,
254b843c749SSergey Zigachev u32 domain, struct amdgpu_bo **bo_ptr,
255b843c749SSergey Zigachev u64 *gpu_addr, void **cpu_addr)
256b843c749SSergey Zigachev {
257b843c749SSergey Zigachev struct amdgpu_bo_param bp;
258b843c749SSergey Zigachev bool free = false;
259b843c749SSergey Zigachev int r;
260b843c749SSergey Zigachev
261b843c749SSergey Zigachev memset(&bp, 0, sizeof(bp));
262b843c749SSergey Zigachev bp.size = size;
263b843c749SSergey Zigachev bp.byte_align = align;
264b843c749SSergey Zigachev bp.domain = domain;
265b843c749SSergey Zigachev bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
266b843c749SSergey Zigachev AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
267b843c749SSergey Zigachev bp.type = ttm_bo_type_kernel;
268b843c749SSergey Zigachev bp.resv = NULL;
269b843c749SSergey Zigachev
270b843c749SSergey Zigachev if (!*bo_ptr) {
271b843c749SSergey Zigachev r = amdgpu_bo_create(adev, &bp, bo_ptr);
272b843c749SSergey Zigachev if (r) {
273b843c749SSergey Zigachev dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
274b843c749SSergey Zigachev r);
275b843c749SSergey Zigachev return r;
276b843c749SSergey Zigachev }
277b843c749SSergey Zigachev free = true;
278b843c749SSergey Zigachev }
279b843c749SSergey Zigachev
280b843c749SSergey Zigachev r = amdgpu_bo_reserve(*bo_ptr, false);
281b843c749SSergey Zigachev if (r) {
282b843c749SSergey Zigachev dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
283b843c749SSergey Zigachev goto error_free;
284b843c749SSergey Zigachev }
285b843c749SSergey Zigachev
286b843c749SSergey Zigachev r = amdgpu_bo_pin(*bo_ptr, domain);
287b843c749SSergey Zigachev if (r) {
288b843c749SSergey Zigachev dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
289b843c749SSergey Zigachev goto error_unreserve;
290b843c749SSergey Zigachev }
291b843c749SSergey Zigachev
292b843c749SSergey Zigachev r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
293b843c749SSergey Zigachev if (r) {
294b843c749SSergey Zigachev dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
295b843c749SSergey Zigachev goto error_unpin;
296b843c749SSergey Zigachev }
297b843c749SSergey Zigachev
298b843c749SSergey Zigachev if (gpu_addr)
299b843c749SSergey Zigachev *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
300b843c749SSergey Zigachev
301b843c749SSergey Zigachev if (cpu_addr) {
302b843c749SSergey Zigachev r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
303b843c749SSergey Zigachev if (r) {
304b843c749SSergey Zigachev dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
305b843c749SSergey Zigachev goto error_unpin;
306b843c749SSergey Zigachev }
307b843c749SSergey Zigachev }
308b843c749SSergey Zigachev
309b843c749SSergey Zigachev return 0;
310b843c749SSergey Zigachev
311b843c749SSergey Zigachev error_unpin:
312b843c749SSergey Zigachev amdgpu_bo_unpin(*bo_ptr);
313b843c749SSergey Zigachev error_unreserve:
314b843c749SSergey Zigachev amdgpu_bo_unreserve(*bo_ptr);
315b843c749SSergey Zigachev
316b843c749SSergey Zigachev error_free:
317b843c749SSergey Zigachev if (free)
318b843c749SSergey Zigachev amdgpu_bo_unref(bo_ptr);
319b843c749SSergey Zigachev
320b843c749SSergey Zigachev return r;
321b843c749SSergey Zigachev }
322b843c749SSergey Zigachev
323b843c749SSergey Zigachev /**
324b843c749SSergey Zigachev * amdgpu_bo_create_kernel - create BO for kernel use
325b843c749SSergey Zigachev *
326b843c749SSergey Zigachev * @adev: amdgpu device object
327b843c749SSergey Zigachev * @size: size for the new BO
328b843c749SSergey Zigachev * @align: alignment for the new BO
329b843c749SSergey Zigachev * @domain: where to place it
330b843c749SSergey Zigachev * @bo_ptr: used to initialize BOs in structures
331b843c749SSergey Zigachev * @gpu_addr: GPU addr of the pinned BO
332b843c749SSergey Zigachev * @cpu_addr: optional CPU address mapping
333b843c749SSergey Zigachev *
334b843c749SSergey Zigachev * Allocates and pins a BO for kernel internal use.
335b843c749SSergey Zigachev *
336b843c749SSergey Zigachev * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
337b843c749SSergey Zigachev *
338b843c749SSergey Zigachev * Returns:
339b843c749SSergey Zigachev * 0 on success, negative error code otherwise.
340b843c749SSergey Zigachev */
amdgpu_bo_create_kernel(struct amdgpu_device * adev,unsigned long size,int align,u32 domain,struct amdgpu_bo ** bo_ptr,u64 * gpu_addr,void ** cpu_addr)341b843c749SSergey Zigachev int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
342b843c749SSergey Zigachev unsigned long size, int align,
343b843c749SSergey Zigachev u32 domain, struct amdgpu_bo **bo_ptr,
344b843c749SSergey Zigachev u64 *gpu_addr, void **cpu_addr)
345b843c749SSergey Zigachev {
346b843c749SSergey Zigachev int r;
347b843c749SSergey Zigachev
348b843c749SSergey Zigachev r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
349b843c749SSergey Zigachev gpu_addr, cpu_addr);
350b843c749SSergey Zigachev
351b843c749SSergey Zigachev if (r)
352b843c749SSergey Zigachev return r;
353b843c749SSergey Zigachev
354b843c749SSergey Zigachev amdgpu_bo_unreserve(*bo_ptr);
355b843c749SSergey Zigachev
356b843c749SSergey Zigachev return 0;
357b843c749SSergey Zigachev }
358b843c749SSergey Zigachev
359b843c749SSergey Zigachev /**
360b843c749SSergey Zigachev * amdgpu_bo_free_kernel - free BO for kernel use
361b843c749SSergey Zigachev *
362b843c749SSergey Zigachev * @bo: amdgpu BO to free
363b843c749SSergey Zigachev * @gpu_addr: pointer to where the BO's GPU memory space address was stored
364b843c749SSergey Zigachev * @cpu_addr: pointer to where the BO's CPU memory space address was stored
365b843c749SSergey Zigachev *
366b843c749SSergey Zigachev * unmaps and unpin a BO for kernel internal use.
367b843c749SSergey Zigachev */
amdgpu_bo_free_kernel(struct amdgpu_bo ** bo,u64 * gpu_addr,void ** cpu_addr)368b843c749SSergey Zigachev void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
369b843c749SSergey Zigachev void **cpu_addr)
370b843c749SSergey Zigachev {
371b843c749SSergey Zigachev if (*bo == NULL)
372b843c749SSergey Zigachev return;
373b843c749SSergey Zigachev
374b843c749SSergey Zigachev if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
375b843c749SSergey Zigachev if (cpu_addr)
376b843c749SSergey Zigachev amdgpu_bo_kunmap(*bo);
377b843c749SSergey Zigachev
378b843c749SSergey Zigachev amdgpu_bo_unpin(*bo);
379b843c749SSergey Zigachev amdgpu_bo_unreserve(*bo);
380b843c749SSergey Zigachev }
381b843c749SSergey Zigachev amdgpu_bo_unref(bo);
382b843c749SSergey Zigachev
383b843c749SSergey Zigachev if (gpu_addr)
384b843c749SSergey Zigachev *gpu_addr = 0;
385b843c749SSergey Zigachev
386b843c749SSergey Zigachev if (cpu_addr)
387b843c749SSergey Zigachev *cpu_addr = NULL;
388b843c749SSergey Zigachev }
389b843c749SSergey Zigachev
390b843c749SSergey Zigachev /* Validate bo size is bit bigger then the request domain */
amdgpu_bo_validate_size(struct amdgpu_device * adev,unsigned long size,u32 domain)391b843c749SSergey Zigachev static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
392b843c749SSergey Zigachev unsigned long size, u32 domain)
393b843c749SSergey Zigachev {
394b843c749SSergey Zigachev struct ttm_mem_type_manager *man = NULL;
395b843c749SSergey Zigachev
396b843c749SSergey Zigachev /*
397b843c749SSergey Zigachev * If GTT is part of requested domains the check must succeed to
398b843c749SSergey Zigachev * allow fall back to GTT
399b843c749SSergey Zigachev */
400b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_GTT) {
401b843c749SSergey Zigachev man = &adev->mman.bdev.man[TTM_PL_TT];
402b843c749SSergey Zigachev
403b843c749SSergey Zigachev if (size < (man->size << PAGE_SHIFT))
404b843c749SSergey Zigachev return true;
405b843c749SSergey Zigachev else
406b843c749SSergey Zigachev goto fail;
407b843c749SSergey Zigachev }
408b843c749SSergey Zigachev
409b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
410b843c749SSergey Zigachev man = &adev->mman.bdev.man[TTM_PL_VRAM];
411b843c749SSergey Zigachev
412b843c749SSergey Zigachev if (size < (man->size << PAGE_SHIFT))
413b843c749SSergey Zigachev return true;
414b843c749SSergey Zigachev else
415b843c749SSergey Zigachev goto fail;
416b843c749SSergey Zigachev }
417b843c749SSergey Zigachev
418b843c749SSergey Zigachev
419b843c749SSergey Zigachev /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
420b843c749SSergey Zigachev return true;
421b843c749SSergey Zigachev
422b843c749SSergey Zigachev fail:
423*78973132SSergey Zigachev DRM_DEBUG("BO size %lu > total memory in domain: %lu\n", size,
424b843c749SSergey Zigachev man->size << PAGE_SHIFT);
425b843c749SSergey Zigachev return false;
426b843c749SSergey Zigachev }
427b843c749SSergey Zigachev
amdgpu_bo_do_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)428b843c749SSergey Zigachev static int amdgpu_bo_do_create(struct amdgpu_device *adev,
429b843c749SSergey Zigachev struct amdgpu_bo_param *bp,
430b843c749SSergey Zigachev struct amdgpu_bo **bo_ptr)
431b843c749SSergey Zigachev {
432b843c749SSergey Zigachev struct ttm_operation_ctx ctx = {
433b843c749SSergey Zigachev .interruptible = (bp->type != ttm_bo_type_kernel),
434b843c749SSergey Zigachev .no_wait_gpu = false,
435b843c749SSergey Zigachev .resv = bp->resv,
436b843c749SSergey Zigachev .flags = bp->type != ttm_bo_type_kernel ?
437b843c749SSergey Zigachev TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
438b843c749SSergey Zigachev };
439b843c749SSergey Zigachev struct amdgpu_bo *bo;
440b843c749SSergey Zigachev unsigned long page_align, size = bp->size;
441b843c749SSergey Zigachev size_t acc_size;
442b843c749SSergey Zigachev int r;
443b843c749SSergey Zigachev
444b843c749SSergey Zigachev page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
445b843c749SSergey Zigachev size = ALIGN(size, PAGE_SIZE);
446b843c749SSergey Zigachev
447b843c749SSergey Zigachev if (!amdgpu_bo_validate_size(adev, size, bp->domain))
448b843c749SSergey Zigachev return -ENOMEM;
449b843c749SSergey Zigachev
450b843c749SSergey Zigachev *bo_ptr = NULL;
451b843c749SSergey Zigachev
452b843c749SSergey Zigachev acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
453b843c749SSergey Zigachev sizeof(struct amdgpu_bo));
454b843c749SSergey Zigachev
455b843c749SSergey Zigachev bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
456b843c749SSergey Zigachev if (bo == NULL)
457b843c749SSergey Zigachev return -ENOMEM;
458b843c749SSergey Zigachev drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
459b843c749SSergey Zigachev INIT_LIST_HEAD(&bo->shadow_list);
460b843c749SSergey Zigachev INIT_LIST_HEAD(&bo->va);
461b843c749SSergey Zigachev bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
462b843c749SSergey Zigachev bp->domain;
463b843c749SSergey Zigachev bo->allowed_domains = bo->preferred_domains;
464b843c749SSergey Zigachev if (bp->type != ttm_bo_type_kernel &&
465b843c749SSergey Zigachev bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
466b843c749SSergey Zigachev bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
467b843c749SSergey Zigachev
468b843c749SSergey Zigachev bo->flags = bp->flags;
469b843c749SSergey Zigachev
470b843c749SSergey Zigachev #ifdef CONFIG_X86_32
471b843c749SSergey Zigachev /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
472b843c749SSergey Zigachev * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
473b843c749SSergey Zigachev */
474b843c749SSergey Zigachev bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
475b843c749SSergey Zigachev #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
476b843c749SSergey Zigachev /* Don't try to enable write-combining when it can't work, or things
477b843c749SSergey Zigachev * may be slow
478b843c749SSergey Zigachev * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
479b843c749SSergey Zigachev */
480b843c749SSergey Zigachev
481b843c749SSergey Zigachev #ifndef CONFIG_COMPILE_TEST
482b843c749SSergey Zigachev #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
483b843c749SSergey Zigachev thanks to write-combining
484b843c749SSergey Zigachev #endif
485b843c749SSergey Zigachev
486b843c749SSergey Zigachev if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
487b843c749SSergey Zigachev DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
488b843c749SSergey Zigachev "better performance thanks to write-combining\n");
489b843c749SSergey Zigachev bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
490b843c749SSergey Zigachev #else
491b843c749SSergey Zigachev /* For architectures that don't support WC memory,
492b843c749SSergey Zigachev * mask out the WC flag from the BO
493b843c749SSergey Zigachev */
494b843c749SSergey Zigachev if (!drm_arch_can_wc_memory())
495b843c749SSergey Zigachev bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
496b843c749SSergey Zigachev #endif
497b843c749SSergey Zigachev
498b843c749SSergey Zigachev bo->tbo.bdev = &adev->mman.bdev;
499b843c749SSergey Zigachev amdgpu_bo_placement_from_domain(bo, bp->domain);
500b843c749SSergey Zigachev if (bp->type == ttm_bo_type_kernel)
501b843c749SSergey Zigachev bo->tbo.priority = 1;
502b843c749SSergey Zigachev
503b843c749SSergey Zigachev r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
504b843c749SSergey Zigachev &bo->placement, page_align, &ctx, acc_size,
505b843c749SSergey Zigachev NULL, bp->resv, &amdgpu_bo_destroy);
506b843c749SSergey Zigachev if (unlikely(r != 0))
507b843c749SSergey Zigachev return r;
508b843c749SSergey Zigachev
509b843c749SSergey Zigachev if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
510b843c749SSergey Zigachev bo->tbo.mem.mem_type == TTM_PL_VRAM &&
511b843c749SSergey Zigachev bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
512b843c749SSergey Zigachev amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
513b843c749SSergey Zigachev ctx.bytes_moved);
514b843c749SSergey Zigachev else
515b843c749SSergey Zigachev amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
516b843c749SSergey Zigachev
517b843c749SSergey Zigachev if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
518b843c749SSergey Zigachev bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
519b843c749SSergey Zigachev struct dma_fence *fence;
520b843c749SSergey Zigachev
521b843c749SSergey Zigachev r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
522b843c749SSergey Zigachev if (unlikely(r))
523b843c749SSergey Zigachev goto fail_unreserve;
524b843c749SSergey Zigachev
525b843c749SSergey Zigachev amdgpu_bo_fence(bo, fence, false);
526b843c749SSergey Zigachev dma_fence_put(bo->tbo.moving);
527b843c749SSergey Zigachev bo->tbo.moving = dma_fence_get(fence);
528b843c749SSergey Zigachev dma_fence_put(fence);
529b843c749SSergey Zigachev }
530b843c749SSergey Zigachev if (!bp->resv)
531b843c749SSergey Zigachev amdgpu_bo_unreserve(bo);
532b843c749SSergey Zigachev *bo_ptr = bo;
533b843c749SSergey Zigachev
534b843c749SSergey Zigachev trace_amdgpu_bo_create(bo);
535b843c749SSergey Zigachev
536b843c749SSergey Zigachev /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
537b843c749SSergey Zigachev if (bp->type == ttm_bo_type_device)
538b843c749SSergey Zigachev bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
539b843c749SSergey Zigachev
540b843c749SSergey Zigachev return 0;
541b843c749SSergey Zigachev
542b843c749SSergey Zigachev fail_unreserve:
543b843c749SSergey Zigachev if (!bp->resv)
544b843c749SSergey Zigachev ww_mutex_unlock(&bo->tbo.resv->lock);
545b843c749SSergey Zigachev amdgpu_bo_unref(&bo);
546b843c749SSergey Zigachev return r;
547b843c749SSergey Zigachev }
548b843c749SSergey Zigachev
amdgpu_bo_create_shadow(struct amdgpu_device * adev,unsigned long size,int byte_align,struct amdgpu_bo * bo)549b843c749SSergey Zigachev static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
550b843c749SSergey Zigachev unsigned long size, int byte_align,
551b843c749SSergey Zigachev struct amdgpu_bo *bo)
552b843c749SSergey Zigachev {
553b843c749SSergey Zigachev struct amdgpu_bo_param bp;
554b843c749SSergey Zigachev int r;
555b843c749SSergey Zigachev
556b843c749SSergey Zigachev if (bo->shadow)
557b843c749SSergey Zigachev return 0;
558b843c749SSergey Zigachev
559b843c749SSergey Zigachev memset(&bp, 0, sizeof(bp));
560b843c749SSergey Zigachev bp.size = size;
561b843c749SSergey Zigachev bp.byte_align = byte_align;
562b843c749SSergey Zigachev bp.domain = AMDGPU_GEM_DOMAIN_GTT;
563b843c749SSergey Zigachev bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
564b843c749SSergey Zigachev AMDGPU_GEM_CREATE_SHADOW;
565b843c749SSergey Zigachev bp.type = ttm_bo_type_kernel;
566b843c749SSergey Zigachev bp.resv = bo->tbo.resv;
567b843c749SSergey Zigachev
568b843c749SSergey Zigachev r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
569b843c749SSergey Zigachev if (!r) {
570b843c749SSergey Zigachev bo->shadow->parent = amdgpu_bo_ref(bo);
571b843c749SSergey Zigachev mutex_lock(&adev->shadow_list_lock);
572b843c749SSergey Zigachev list_add_tail(&bo->shadow_list, &adev->shadow_list);
573b843c749SSergey Zigachev mutex_unlock(&adev->shadow_list_lock);
574b843c749SSergey Zigachev }
575b843c749SSergey Zigachev
576b843c749SSergey Zigachev return r;
577b843c749SSergey Zigachev }
578b843c749SSergey Zigachev
579b843c749SSergey Zigachev /**
580b843c749SSergey Zigachev * amdgpu_bo_create - create an &amdgpu_bo buffer object
581b843c749SSergey Zigachev * @adev: amdgpu device object
582b843c749SSergey Zigachev * @bp: parameters to be used for the buffer object
583b843c749SSergey Zigachev * @bo_ptr: pointer to the buffer object pointer
584b843c749SSergey Zigachev *
585b843c749SSergey Zigachev * Creates an &amdgpu_bo buffer object; and if requested, also creates a
586b843c749SSergey Zigachev * shadow object.
587b843c749SSergey Zigachev * Shadow object is used to backup the original buffer object, and is always
588b843c749SSergey Zigachev * in GTT.
589b843c749SSergey Zigachev *
590b843c749SSergey Zigachev * Returns:
591b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
592b843c749SSergey Zigachev */
amdgpu_bo_create(struct amdgpu_device * adev,struct amdgpu_bo_param * bp,struct amdgpu_bo ** bo_ptr)593b843c749SSergey Zigachev int amdgpu_bo_create(struct amdgpu_device *adev,
594b843c749SSergey Zigachev struct amdgpu_bo_param *bp,
595b843c749SSergey Zigachev struct amdgpu_bo **bo_ptr)
596b843c749SSergey Zigachev {
597b843c749SSergey Zigachev u64 flags = bp->flags;
598b843c749SSergey Zigachev int r;
599b843c749SSergey Zigachev
600b843c749SSergey Zigachev bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
601b843c749SSergey Zigachev r = amdgpu_bo_do_create(adev, bp, bo_ptr);
602b843c749SSergey Zigachev if (r)
603b843c749SSergey Zigachev return r;
604b843c749SSergey Zigachev
605b843c749SSergey Zigachev if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
606b843c749SSergey Zigachev if (!bp->resv)
607b843c749SSergey Zigachev WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
608b843c749SSergey Zigachev NULL));
609b843c749SSergey Zigachev
610b843c749SSergey Zigachev r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
611b843c749SSergey Zigachev
612b843c749SSergey Zigachev if (!bp->resv)
613b843c749SSergey Zigachev reservation_object_unlock((*bo_ptr)->tbo.resv);
614b843c749SSergey Zigachev
615b843c749SSergey Zigachev if (r)
616b843c749SSergey Zigachev amdgpu_bo_unref(bo_ptr);
617b843c749SSergey Zigachev }
618b843c749SSergey Zigachev
619b843c749SSergey Zigachev return r;
620b843c749SSergey Zigachev }
621b843c749SSergey Zigachev
622b843c749SSergey Zigachev /**
623b843c749SSergey Zigachev * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
624b843c749SSergey Zigachev * @adev: amdgpu device object
625b843c749SSergey Zigachev * @ring: amdgpu_ring for the engine handling the buffer operations
626b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer to be backed up
627b843c749SSergey Zigachev * @resv: reservation object with embedded fence
628b843c749SSergey Zigachev * @fence: dma_fence associated with the operation
629b843c749SSergey Zigachev * @direct: whether to submit the job directly
630b843c749SSergey Zigachev *
631b843c749SSergey Zigachev * Copies an &amdgpu_bo buffer object to its shadow object.
632b843c749SSergey Zigachev * Not used for now.
633b843c749SSergey Zigachev *
634b843c749SSergey Zigachev * Returns:
635b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
636b843c749SSergey Zigachev */
amdgpu_bo_backup_to_shadow(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct reservation_object * resv,struct dma_fence ** fence,bool direct)637b843c749SSergey Zigachev int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
638b843c749SSergey Zigachev struct amdgpu_ring *ring,
639b843c749SSergey Zigachev struct amdgpu_bo *bo,
640b843c749SSergey Zigachev struct reservation_object *resv,
641b843c749SSergey Zigachev struct dma_fence **fence,
642b843c749SSergey Zigachev bool direct)
643b843c749SSergey Zigachev
644b843c749SSergey Zigachev {
645b843c749SSergey Zigachev struct amdgpu_bo *shadow = bo->shadow;
646b843c749SSergey Zigachev uint64_t bo_addr, shadow_addr;
647b843c749SSergey Zigachev int r;
648b843c749SSergey Zigachev
649b843c749SSergey Zigachev if (!shadow)
650b843c749SSergey Zigachev return -EINVAL;
651b843c749SSergey Zigachev
652b843c749SSergey Zigachev bo_addr = amdgpu_bo_gpu_offset(bo);
653b843c749SSergey Zigachev shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
654b843c749SSergey Zigachev
655b843c749SSergey Zigachev r = reservation_object_reserve_shared(bo->tbo.resv);
656b843c749SSergey Zigachev if (r)
657b843c749SSergey Zigachev goto err;
658b843c749SSergey Zigachev
659b843c749SSergey Zigachev r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
660b843c749SSergey Zigachev amdgpu_bo_size(bo), resv, fence,
661b843c749SSergey Zigachev direct, false);
662b843c749SSergey Zigachev if (!r)
663b843c749SSergey Zigachev amdgpu_bo_fence(bo, *fence, true);
664b843c749SSergey Zigachev
665b843c749SSergey Zigachev err:
666b843c749SSergey Zigachev return r;
667b843c749SSergey Zigachev }
668b843c749SSergey Zigachev
669b843c749SSergey Zigachev /**
670b843c749SSergey Zigachev * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
671b843c749SSergey Zigachev * @bo: pointer to the buffer object
672b843c749SSergey Zigachev *
673b843c749SSergey Zigachev * Sets placement according to domain; and changes placement and caching
674b843c749SSergey Zigachev * policy of the buffer object according to the placement.
675b843c749SSergey Zigachev * This is used for validating shadow bos. It calls ttm_bo_validate() to
676b843c749SSergey Zigachev * make sure the buffer is resident where it needs to be.
677b843c749SSergey Zigachev *
678b843c749SSergey Zigachev * Returns:
679b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
680b843c749SSergey Zigachev */
amdgpu_bo_validate(struct amdgpu_bo * bo)681b843c749SSergey Zigachev int amdgpu_bo_validate(struct amdgpu_bo *bo)
682b843c749SSergey Zigachev {
683b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { false, false };
684b843c749SSergey Zigachev uint32_t domain;
685b843c749SSergey Zigachev int r;
686b843c749SSergey Zigachev
687b843c749SSergey Zigachev if (bo->pin_count)
688b843c749SSergey Zigachev return 0;
689b843c749SSergey Zigachev
690b843c749SSergey Zigachev domain = bo->preferred_domains;
691b843c749SSergey Zigachev
692b843c749SSergey Zigachev retry:
693b843c749SSergey Zigachev amdgpu_bo_placement_from_domain(bo, domain);
694b843c749SSergey Zigachev r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
695b843c749SSergey Zigachev if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
696b843c749SSergey Zigachev domain = bo->allowed_domains;
697b843c749SSergey Zigachev goto retry;
698b843c749SSergey Zigachev }
699b843c749SSergey Zigachev
700b843c749SSergey Zigachev return r;
701b843c749SSergey Zigachev }
702b843c749SSergey Zigachev
703b843c749SSergey Zigachev /**
704b843c749SSergey Zigachev * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
705b843c749SSergey Zigachev * @adev: amdgpu device object
706b843c749SSergey Zigachev * @ring: amdgpu_ring for the engine handling the buffer operations
707b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer to be restored
708b843c749SSergey Zigachev * @resv: reservation object with embedded fence
709b843c749SSergey Zigachev * @fence: dma_fence associated with the operation
710b843c749SSergey Zigachev * @direct: whether to submit the job directly
711b843c749SSergey Zigachev *
712b843c749SSergey Zigachev * Copies a buffer object's shadow content back to the object.
713b843c749SSergey Zigachev * This is used for recovering a buffer from its shadow in case of a gpu
714b843c749SSergey Zigachev * reset where vram context may be lost.
715b843c749SSergey Zigachev *
716b843c749SSergey Zigachev * Returns:
717b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
718b843c749SSergey Zigachev */
amdgpu_bo_restore_from_shadow(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct reservation_object * resv,struct dma_fence ** fence,bool direct)719b843c749SSergey Zigachev int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
720b843c749SSergey Zigachev struct amdgpu_ring *ring,
721b843c749SSergey Zigachev struct amdgpu_bo *bo,
722b843c749SSergey Zigachev struct reservation_object *resv,
723b843c749SSergey Zigachev struct dma_fence **fence,
724b843c749SSergey Zigachev bool direct)
725b843c749SSergey Zigachev
726b843c749SSergey Zigachev {
727b843c749SSergey Zigachev struct amdgpu_bo *shadow = bo->shadow;
728b843c749SSergey Zigachev uint64_t bo_addr, shadow_addr;
729b843c749SSergey Zigachev int r;
730b843c749SSergey Zigachev
731b843c749SSergey Zigachev if (!shadow)
732b843c749SSergey Zigachev return -EINVAL;
733b843c749SSergey Zigachev
734b843c749SSergey Zigachev bo_addr = amdgpu_bo_gpu_offset(bo);
735b843c749SSergey Zigachev shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
736b843c749SSergey Zigachev
737b843c749SSergey Zigachev r = reservation_object_reserve_shared(bo->tbo.resv);
738b843c749SSergey Zigachev if (r)
739b843c749SSergey Zigachev goto err;
740b843c749SSergey Zigachev
741b843c749SSergey Zigachev r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
742b843c749SSergey Zigachev amdgpu_bo_size(bo), resv, fence,
743b843c749SSergey Zigachev direct, false);
744b843c749SSergey Zigachev if (!r)
745b843c749SSergey Zigachev amdgpu_bo_fence(bo, *fence, true);
746b843c749SSergey Zigachev
747b843c749SSergey Zigachev err:
748b843c749SSergey Zigachev return r;
749b843c749SSergey Zigachev }
750b843c749SSergey Zigachev
751b843c749SSergey Zigachev /**
752b843c749SSergey Zigachev * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
753b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object to be mapped
754b843c749SSergey Zigachev * @ptr: kernel virtual address to be returned
755b843c749SSergey Zigachev *
756b843c749SSergey Zigachev * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
757b843c749SSergey Zigachev * amdgpu_bo_kptr() to get the kernel virtual address.
758b843c749SSergey Zigachev *
759b843c749SSergey Zigachev * Returns:
760b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
761b843c749SSergey Zigachev */
amdgpu_bo_kmap(struct amdgpu_bo * bo,void ** ptr)762b843c749SSergey Zigachev int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
763b843c749SSergey Zigachev {
764b843c749SSergey Zigachev void *kptr;
765b843c749SSergey Zigachev long r;
766b843c749SSergey Zigachev
767b843c749SSergey Zigachev if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
768b843c749SSergey Zigachev return -EPERM;
769b843c749SSergey Zigachev
770b843c749SSergey Zigachev kptr = amdgpu_bo_kptr(bo);
771b843c749SSergey Zigachev if (kptr) {
772b843c749SSergey Zigachev if (ptr)
773b843c749SSergey Zigachev *ptr = kptr;
774b843c749SSergey Zigachev return 0;
775b843c749SSergey Zigachev }
776b843c749SSergey Zigachev
777b843c749SSergey Zigachev r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
778b843c749SSergey Zigachev MAX_SCHEDULE_TIMEOUT);
779b843c749SSergey Zigachev if (r < 0)
780b843c749SSergey Zigachev return r;
781b843c749SSergey Zigachev
782b843c749SSergey Zigachev r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
783b843c749SSergey Zigachev if (r)
784b843c749SSergey Zigachev return r;
785b843c749SSergey Zigachev
786b843c749SSergey Zigachev if (ptr)
787b843c749SSergey Zigachev *ptr = amdgpu_bo_kptr(bo);
788b843c749SSergey Zigachev
789b843c749SSergey Zigachev return 0;
790b843c749SSergey Zigachev }
791b843c749SSergey Zigachev
792b843c749SSergey Zigachev /**
793b843c749SSergey Zigachev * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
794b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
795b843c749SSergey Zigachev *
796b843c749SSergey Zigachev * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
797b843c749SSergey Zigachev *
798b843c749SSergey Zigachev * Returns:
799b843c749SSergey Zigachev * the virtual address of a buffer object area.
800b843c749SSergey Zigachev */
amdgpu_bo_kptr(struct amdgpu_bo * bo)801b843c749SSergey Zigachev void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
802b843c749SSergey Zigachev {
803b843c749SSergey Zigachev bool is_iomem;
804b843c749SSergey Zigachev
805b843c749SSergey Zigachev return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
806b843c749SSergey Zigachev }
807b843c749SSergey Zigachev
808b843c749SSergey Zigachev /**
809b843c749SSergey Zigachev * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
810b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object to be unmapped
811b843c749SSergey Zigachev *
812b843c749SSergey Zigachev * Unmaps a kernel map set up by amdgpu_bo_kmap().
813b843c749SSergey Zigachev */
amdgpu_bo_kunmap(struct amdgpu_bo * bo)814b843c749SSergey Zigachev void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
815b843c749SSergey Zigachev {
816b843c749SSergey Zigachev if (bo->kmap.bo)
817b843c749SSergey Zigachev ttm_bo_kunmap(&bo->kmap);
818b843c749SSergey Zigachev }
819b843c749SSergey Zigachev
820b843c749SSergey Zigachev /**
821b843c749SSergey Zigachev * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
822b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
823b843c749SSergey Zigachev *
824b843c749SSergey Zigachev * References the contained &ttm_buffer_object.
825b843c749SSergey Zigachev *
826b843c749SSergey Zigachev * Returns:
827b843c749SSergey Zigachev * a refcounted pointer to the &amdgpu_bo buffer object.
828b843c749SSergey Zigachev */
amdgpu_bo_ref(struct amdgpu_bo * bo)829b843c749SSergey Zigachev struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
830b843c749SSergey Zigachev {
831b843c749SSergey Zigachev if (bo == NULL)
832b843c749SSergey Zigachev return NULL;
833b843c749SSergey Zigachev
834b843c749SSergey Zigachev ttm_bo_get(&bo->tbo);
835b843c749SSergey Zigachev return bo;
836b843c749SSergey Zigachev }
837b843c749SSergey Zigachev
838b843c749SSergey Zigachev /**
839b843c749SSergey Zigachev * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
840b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
841b843c749SSergey Zigachev *
842b843c749SSergey Zigachev * Unreferences the contained &ttm_buffer_object and clear the pointer
843b843c749SSergey Zigachev */
amdgpu_bo_unref(struct amdgpu_bo ** bo)844b843c749SSergey Zigachev void amdgpu_bo_unref(struct amdgpu_bo **bo)
845b843c749SSergey Zigachev {
846b843c749SSergey Zigachev struct ttm_buffer_object *tbo;
847b843c749SSergey Zigachev
848b843c749SSergey Zigachev if ((*bo) == NULL)
849b843c749SSergey Zigachev return;
850b843c749SSergey Zigachev
851b843c749SSergey Zigachev tbo = &((*bo)->tbo);
852b843c749SSergey Zigachev ttm_bo_put(tbo);
853b843c749SSergey Zigachev *bo = NULL;
854b843c749SSergey Zigachev }
855b843c749SSergey Zigachev
856b843c749SSergey Zigachev /**
857b843c749SSergey Zigachev * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
858b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object to be pinned
859b843c749SSergey Zigachev * @domain: domain to be pinned to
860b843c749SSergey Zigachev * @min_offset: the start of requested address range
861b843c749SSergey Zigachev * @max_offset: the end of requested address range
862b843c749SSergey Zigachev *
863b843c749SSergey Zigachev * Pins the buffer object according to requested domain and address range. If
864b843c749SSergey Zigachev * the memory is unbound gart memory, binds the pages into gart table. Adjusts
865b843c749SSergey Zigachev * pin_count and pin_size accordingly.
866b843c749SSergey Zigachev *
867b843c749SSergey Zigachev * Pinning means to lock pages in memory along with keeping them at a fixed
868b843c749SSergey Zigachev * offset. It is required when a buffer can not be moved, for example, when
869b843c749SSergey Zigachev * a display buffer is being scanned out.
870b843c749SSergey Zigachev *
871b843c749SSergey Zigachev * Compared with amdgpu_bo_pin(), this function gives more flexibility on
872b843c749SSergey Zigachev * where to pin a buffer if there are specific restrictions on where a buffer
873b843c749SSergey Zigachev * must be located.
874b843c749SSergey Zigachev *
875b843c749SSergey Zigachev * Returns:
876b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
877b843c749SSergey Zigachev */
amdgpu_bo_pin_restricted(struct amdgpu_bo * bo,u32 domain,u64 min_offset,u64 max_offset)878b843c749SSergey Zigachev int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
879b843c749SSergey Zigachev u64 min_offset, u64 max_offset)
880b843c749SSergey Zigachev {
881b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
882b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { false, false };
883b843c749SSergey Zigachev int r, i;
884b843c749SSergey Zigachev
885b843c749SSergey Zigachev if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
886b843c749SSergey Zigachev return -EPERM;
887b843c749SSergey Zigachev
888b843c749SSergey Zigachev if (WARN_ON_ONCE(min_offset > max_offset))
889b843c749SSergey Zigachev return -EINVAL;
890b843c749SSergey Zigachev
891b843c749SSergey Zigachev /* A shared bo cannot be migrated to VRAM */
892b843c749SSergey Zigachev if (bo->prime_shared_count) {
893b843c749SSergey Zigachev if (domain & AMDGPU_GEM_DOMAIN_GTT)
894b843c749SSergey Zigachev domain = AMDGPU_GEM_DOMAIN_GTT;
895b843c749SSergey Zigachev else
896b843c749SSergey Zigachev return -EINVAL;
897b843c749SSergey Zigachev }
898b843c749SSergey Zigachev
899b843c749SSergey Zigachev /* This assumes only APU display buffers are pinned with (VRAM|GTT).
900b843c749SSergey Zigachev * See function amdgpu_display_supported_domains()
901b843c749SSergey Zigachev */
902b843c749SSergey Zigachev domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
903b843c749SSergey Zigachev
904b843c749SSergey Zigachev if (bo->pin_count) {
905b843c749SSergey Zigachev uint32_t mem_type = bo->tbo.mem.mem_type;
906b843c749SSergey Zigachev
907b843c749SSergey Zigachev if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
908b843c749SSergey Zigachev return -EINVAL;
909b843c749SSergey Zigachev
910b843c749SSergey Zigachev bo->pin_count++;
911b843c749SSergey Zigachev
912b843c749SSergey Zigachev if (max_offset != 0) {
913b843c749SSergey Zigachev u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
914b843c749SSergey Zigachev WARN_ON_ONCE(max_offset <
915b843c749SSergey Zigachev (amdgpu_bo_gpu_offset(bo) - domain_start));
916b843c749SSergey Zigachev }
917b843c749SSergey Zigachev
918b843c749SSergey Zigachev return 0;
919b843c749SSergey Zigachev }
920b843c749SSergey Zigachev
921b843c749SSergey Zigachev bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
922b843c749SSergey Zigachev /* force to pin into visible video ram */
923b843c749SSergey Zigachev if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
924b843c749SSergey Zigachev bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
925b843c749SSergey Zigachev amdgpu_bo_placement_from_domain(bo, domain);
926b843c749SSergey Zigachev for (i = 0; i < bo->placement.num_placement; i++) {
927b843c749SSergey Zigachev unsigned fpfn, lpfn;
928b843c749SSergey Zigachev
929b843c749SSergey Zigachev fpfn = min_offset >> PAGE_SHIFT;
930b843c749SSergey Zigachev lpfn = max_offset >> PAGE_SHIFT;
931b843c749SSergey Zigachev
932b843c749SSergey Zigachev if (fpfn > bo->placements[i].fpfn)
933b843c749SSergey Zigachev bo->placements[i].fpfn = fpfn;
934b843c749SSergey Zigachev if (!bo->placements[i].lpfn ||
935b843c749SSergey Zigachev (lpfn && lpfn < bo->placements[i].lpfn))
936b843c749SSergey Zigachev bo->placements[i].lpfn = lpfn;
937b843c749SSergey Zigachev bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
938b843c749SSergey Zigachev }
939b843c749SSergey Zigachev
940b843c749SSergey Zigachev r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
941b843c749SSergey Zigachev if (unlikely(r)) {
942b843c749SSergey Zigachev dev_err(adev->dev, "%p pin failed\n", bo);
943b843c749SSergey Zigachev goto error;
944b843c749SSergey Zigachev }
945b843c749SSergey Zigachev
946b843c749SSergey Zigachev bo->pin_count = 1;
947b843c749SSergey Zigachev
948b843c749SSergey Zigachev domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
949b843c749SSergey Zigachev if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
950b843c749SSergey Zigachev atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
951b843c749SSergey Zigachev atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
952b843c749SSergey Zigachev &adev->visible_pin_size);
953b843c749SSergey Zigachev } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
954b843c749SSergey Zigachev atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
955b843c749SSergey Zigachev }
956b843c749SSergey Zigachev
957b843c749SSergey Zigachev error:
958b843c749SSergey Zigachev return r;
959b843c749SSergey Zigachev }
960b843c749SSergey Zigachev
961b843c749SSergey Zigachev /**
962b843c749SSergey Zigachev * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
963b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object to be pinned
964b843c749SSergey Zigachev * @domain: domain to be pinned to
965b843c749SSergey Zigachev *
966b843c749SSergey Zigachev * A simple wrapper to amdgpu_bo_pin_restricted().
967b843c749SSergey Zigachev * Provides a simpler API for buffers that do not have any strict restrictions
968b843c749SSergey Zigachev * on where a buffer must be located.
969b843c749SSergey Zigachev *
970b843c749SSergey Zigachev * Returns:
971b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
972b843c749SSergey Zigachev */
amdgpu_bo_pin(struct amdgpu_bo * bo,u32 domain)973b843c749SSergey Zigachev int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
974b843c749SSergey Zigachev {
975b843c749SSergey Zigachev return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
976b843c749SSergey Zigachev }
977b843c749SSergey Zigachev
978b843c749SSergey Zigachev /**
979b843c749SSergey Zigachev * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
980b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object to be unpinned
981b843c749SSergey Zigachev *
982b843c749SSergey Zigachev * Decreases the pin_count, and clears the flags if pin_count reaches 0.
983b843c749SSergey Zigachev * Changes placement and pin size accordingly.
984b843c749SSergey Zigachev *
985b843c749SSergey Zigachev * Returns:
986b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
987b843c749SSergey Zigachev */
amdgpu_bo_unpin(struct amdgpu_bo * bo)988b843c749SSergey Zigachev int amdgpu_bo_unpin(struct amdgpu_bo *bo)
989b843c749SSergey Zigachev {
990b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
991b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { false, false };
992b843c749SSergey Zigachev int r, i;
993b843c749SSergey Zigachev
994b843c749SSergey Zigachev if (!bo->pin_count) {
995b843c749SSergey Zigachev dev_warn(adev->dev, "%p unpin not necessary\n", bo);
996b843c749SSergey Zigachev return 0;
997b843c749SSergey Zigachev }
998b843c749SSergey Zigachev bo->pin_count--;
999b843c749SSergey Zigachev if (bo->pin_count)
1000b843c749SSergey Zigachev return 0;
1001b843c749SSergey Zigachev
1002b843c749SSergey Zigachev amdgpu_bo_subtract_pin_size(bo);
1003b843c749SSergey Zigachev
1004b843c749SSergey Zigachev for (i = 0; i < bo->placement.num_placement; i++) {
1005b843c749SSergey Zigachev bo->placements[i].lpfn = 0;
1006b843c749SSergey Zigachev bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1007b843c749SSergey Zigachev }
1008b843c749SSergey Zigachev r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1009b843c749SSergey Zigachev if (unlikely(r))
1010b843c749SSergey Zigachev dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1011b843c749SSergey Zigachev
1012b843c749SSergey Zigachev return r;
1013b843c749SSergey Zigachev }
1014b843c749SSergey Zigachev
1015b843c749SSergey Zigachev /**
1016b843c749SSergey Zigachev * amdgpu_bo_evict_vram - evict VRAM buffers
1017b843c749SSergey Zigachev * @adev: amdgpu device object
1018b843c749SSergey Zigachev *
1019b843c749SSergey Zigachev * Evicts all VRAM buffers on the lru list of the memory type.
1020b843c749SSergey Zigachev * Mainly used for evicting vram at suspend time.
1021b843c749SSergey Zigachev *
1022b843c749SSergey Zigachev * Returns:
1023b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1024b843c749SSergey Zigachev */
amdgpu_bo_evict_vram(struct amdgpu_device * adev)1025b843c749SSergey Zigachev int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1026b843c749SSergey Zigachev {
1027b843c749SSergey Zigachev /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1028b843c749SSergey Zigachev if (0 && (adev->flags & AMD_IS_APU)) {
1029b843c749SSergey Zigachev /* Useless to evict on IGP chips */
1030b843c749SSergey Zigachev return 0;
1031b843c749SSergey Zigachev }
1032b843c749SSergey Zigachev return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1033b843c749SSergey Zigachev }
1034b843c749SSergey Zigachev
1035b843c749SSergey Zigachev static const char *amdgpu_vram_names[] = {
1036b843c749SSergey Zigachev "UNKNOWN",
1037b843c749SSergey Zigachev "GDDR1",
1038b843c749SSergey Zigachev "DDR2",
1039b843c749SSergey Zigachev "GDDR3",
1040b843c749SSergey Zigachev "GDDR4",
1041b843c749SSergey Zigachev "GDDR5",
1042b843c749SSergey Zigachev "HBM",
1043b843c749SSergey Zigachev "DDR3",
1044b843c749SSergey Zigachev "DDR4",
1045b843c749SSergey Zigachev };
1046b843c749SSergey Zigachev
1047b843c749SSergey Zigachev /**
1048b843c749SSergey Zigachev * amdgpu_bo_init - initialize memory manager
1049b843c749SSergey Zigachev * @adev: amdgpu device object
1050b843c749SSergey Zigachev *
1051b843c749SSergey Zigachev * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1052b843c749SSergey Zigachev *
1053b843c749SSergey Zigachev * Returns:
1054b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1055b843c749SSergey Zigachev */
amdgpu_bo_init(struct amdgpu_device * adev)1056b843c749SSergey Zigachev int amdgpu_bo_init(struct amdgpu_device *adev)
1057b843c749SSergey Zigachev {
1058b843c749SSergey Zigachev /* reserve PAT memory space to WC for VRAM */
1059b843c749SSergey Zigachev arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1060b843c749SSergey Zigachev adev->gmc.aper_size);
1061b843c749SSergey Zigachev
1062b843c749SSergey Zigachev /* Add an MTRR for the VRAM */
1063b843c749SSergey Zigachev adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1064b843c749SSergey Zigachev adev->gmc.aper_size);
1065b843c749SSergey Zigachev DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1066b843c749SSergey Zigachev adev->gmc.mc_vram_size >> 20,
1067b843c749SSergey Zigachev (unsigned long long)adev->gmc.aper_size >> 20);
1068b843c749SSergey Zigachev DRM_INFO("RAM width %dbits %s\n",
1069b843c749SSergey Zigachev adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1070b843c749SSergey Zigachev return amdgpu_ttm_init(adev);
1071b843c749SSergey Zigachev }
1072b843c749SSergey Zigachev
1073b843c749SSergey Zigachev /**
1074b843c749SSergey Zigachev * amdgpu_bo_late_init - late init
1075b843c749SSergey Zigachev * @adev: amdgpu device object
1076b843c749SSergey Zigachev *
1077b843c749SSergey Zigachev * Calls amdgpu_ttm_late_init() to free resources used earlier during
1078b843c749SSergey Zigachev * initialization.
1079b843c749SSergey Zigachev *
1080b843c749SSergey Zigachev * Returns:
1081b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1082b843c749SSergey Zigachev */
amdgpu_bo_late_init(struct amdgpu_device * adev)1083b843c749SSergey Zigachev int amdgpu_bo_late_init(struct amdgpu_device *adev)
1084b843c749SSergey Zigachev {
1085b843c749SSergey Zigachev amdgpu_ttm_late_init(adev);
1086b843c749SSergey Zigachev
1087b843c749SSergey Zigachev return 0;
1088b843c749SSergey Zigachev }
1089b843c749SSergey Zigachev
1090b843c749SSergey Zigachev /**
1091b843c749SSergey Zigachev * amdgpu_bo_fini - tear down memory manager
1092b843c749SSergey Zigachev * @adev: amdgpu device object
1093b843c749SSergey Zigachev *
1094b843c749SSergey Zigachev * Reverses amdgpu_bo_init() to tear down memory manager.
1095b843c749SSergey Zigachev */
amdgpu_bo_fini(struct amdgpu_device * adev)1096b843c749SSergey Zigachev void amdgpu_bo_fini(struct amdgpu_device *adev)
1097b843c749SSergey Zigachev {
1098b843c749SSergey Zigachev amdgpu_ttm_fini(adev);
1099b843c749SSergey Zigachev arch_phys_wc_del(adev->gmc.vram_mtrr);
1100b843c749SSergey Zigachev arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1101b843c749SSergey Zigachev }
1102b843c749SSergey Zigachev
1103b843c749SSergey Zigachev /**
1104b843c749SSergey Zigachev * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1105b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
1106b843c749SSergey Zigachev * @vma: vma as input from the fbdev mmap method
1107b843c749SSergey Zigachev *
1108b843c749SSergey Zigachev * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1109b843c749SSergey Zigachev *
1110b843c749SSergey Zigachev * Returns:
1111b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1112b843c749SSergey Zigachev */
amdgpu_bo_fbdev_mmap(struct amdgpu_bo * bo,struct vm_area_struct * vma)1113b843c749SSergey Zigachev int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1114b843c749SSergey Zigachev struct vm_area_struct *vma)
1115b843c749SSergey Zigachev {
1116b843c749SSergey Zigachev return ttm_fbdev_mmap(vma, &bo->tbo);
1117b843c749SSergey Zigachev }
1118b843c749SSergey Zigachev
1119b843c749SSergey Zigachev /**
1120b843c749SSergey Zigachev * amdgpu_bo_set_tiling_flags - set tiling flags
1121b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
1122b843c749SSergey Zigachev * @tiling_flags: new flags
1123b843c749SSergey Zigachev *
1124b843c749SSergey Zigachev * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1125b843c749SSergey Zigachev * kernel driver to set the tiling flags on a buffer.
1126b843c749SSergey Zigachev *
1127b843c749SSergey Zigachev * Returns:
1128b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1129b843c749SSergey Zigachev */
amdgpu_bo_set_tiling_flags(struct amdgpu_bo * bo,u64 tiling_flags)1130b843c749SSergey Zigachev int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1131b843c749SSergey Zigachev {
1132b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1133b843c749SSergey Zigachev
1134b843c749SSergey Zigachev if (adev->family <= AMDGPU_FAMILY_CZ &&
1135b843c749SSergey Zigachev AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1136b843c749SSergey Zigachev return -EINVAL;
1137b843c749SSergey Zigachev
1138b843c749SSergey Zigachev bo->tiling_flags = tiling_flags;
1139b843c749SSergey Zigachev return 0;
1140b843c749SSergey Zigachev }
1141b843c749SSergey Zigachev
1142b843c749SSergey Zigachev /**
1143b843c749SSergey Zigachev * amdgpu_bo_get_tiling_flags - get tiling flags
1144b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
1145b843c749SSergey Zigachev * @tiling_flags: returned flags
1146b843c749SSergey Zigachev *
1147b843c749SSergey Zigachev * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1148b843c749SSergey Zigachev * set the tiling flags on a buffer.
1149b843c749SSergey Zigachev */
amdgpu_bo_get_tiling_flags(struct amdgpu_bo * bo,u64 * tiling_flags)1150b843c749SSergey Zigachev void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1151b843c749SSergey Zigachev {
1152b843c749SSergey Zigachev lockdep_assert_held(&bo->tbo.resv->lock.base);
1153b843c749SSergey Zigachev
1154b843c749SSergey Zigachev if (tiling_flags)
1155b843c749SSergey Zigachev *tiling_flags = bo->tiling_flags;
1156b843c749SSergey Zigachev }
1157b843c749SSergey Zigachev
1158b843c749SSergey Zigachev /**
1159b843c749SSergey Zigachev * amdgpu_bo_set_metadata - set metadata
1160b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
1161b843c749SSergey Zigachev * @metadata: new metadata
1162b843c749SSergey Zigachev * @metadata_size: size of the new metadata
1163b843c749SSergey Zigachev * @flags: flags of the new metadata
1164b843c749SSergey Zigachev *
1165b843c749SSergey Zigachev * Sets buffer object's metadata, its size and flags.
1166b843c749SSergey Zigachev * Used via GEM ioctl.
1167b843c749SSergey Zigachev *
1168b843c749SSergey Zigachev * Returns:
1169b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1170b843c749SSergey Zigachev */
amdgpu_bo_set_metadata(struct amdgpu_bo * bo,void * metadata,uint32_t metadata_size,uint64_t flags)1171b843c749SSergey Zigachev int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1172b843c749SSergey Zigachev uint32_t metadata_size, uint64_t flags)
1173b843c749SSergey Zigachev {
1174b843c749SSergey Zigachev void *buffer;
1175b843c749SSergey Zigachev
1176b843c749SSergey Zigachev if (!metadata_size) {
1177b843c749SSergey Zigachev if (bo->metadata_size) {
1178b843c749SSergey Zigachev kfree(bo->metadata);
1179b843c749SSergey Zigachev bo->metadata = NULL;
1180b843c749SSergey Zigachev bo->metadata_size = 0;
1181b843c749SSergey Zigachev }
1182b843c749SSergey Zigachev return 0;
1183b843c749SSergey Zigachev }
1184b843c749SSergey Zigachev
1185b843c749SSergey Zigachev if (metadata == NULL)
1186b843c749SSergey Zigachev return -EINVAL;
1187b843c749SSergey Zigachev
1188b843c749SSergey Zigachev buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1189b843c749SSergey Zigachev if (buffer == NULL)
1190b843c749SSergey Zigachev return -ENOMEM;
1191b843c749SSergey Zigachev
1192b843c749SSergey Zigachev kfree(bo->metadata);
1193b843c749SSergey Zigachev bo->metadata_flags = flags;
1194b843c749SSergey Zigachev bo->metadata = buffer;
1195b843c749SSergey Zigachev bo->metadata_size = metadata_size;
1196b843c749SSergey Zigachev
1197b843c749SSergey Zigachev return 0;
1198b843c749SSergey Zigachev }
1199b843c749SSergey Zigachev
1200b843c749SSergey Zigachev /**
1201b843c749SSergey Zigachev * amdgpu_bo_get_metadata - get metadata
1202b843c749SSergey Zigachev * @bo: &amdgpu_bo buffer object
1203b843c749SSergey Zigachev * @buffer: returned metadata
1204b843c749SSergey Zigachev * @buffer_size: size of the buffer
1205b843c749SSergey Zigachev * @metadata_size: size of the returned metadata
1206b843c749SSergey Zigachev * @flags: flags of the returned metadata
1207b843c749SSergey Zigachev *
1208b843c749SSergey Zigachev * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1209b843c749SSergey Zigachev * less than metadata_size.
1210b843c749SSergey Zigachev * Used via GEM ioctl.
1211b843c749SSergey Zigachev *
1212b843c749SSergey Zigachev * Returns:
1213b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1214b843c749SSergey Zigachev */
amdgpu_bo_get_metadata(struct amdgpu_bo * bo,void * buffer,size_t buffer_size,uint32_t * metadata_size,uint64_t * flags)1215b843c749SSergey Zigachev int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1216b843c749SSergey Zigachev size_t buffer_size, uint32_t *metadata_size,
1217b843c749SSergey Zigachev uint64_t *flags)
1218b843c749SSergey Zigachev {
1219b843c749SSergey Zigachev if (!buffer && !metadata_size)
1220b843c749SSergey Zigachev return -EINVAL;
1221b843c749SSergey Zigachev
1222b843c749SSergey Zigachev if (buffer) {
1223b843c749SSergey Zigachev if (buffer_size < bo->metadata_size)
1224b843c749SSergey Zigachev return -EINVAL;
1225b843c749SSergey Zigachev
1226b843c749SSergey Zigachev if (bo->metadata_size)
1227b843c749SSergey Zigachev memcpy(buffer, bo->metadata, bo->metadata_size);
1228b843c749SSergey Zigachev }
1229b843c749SSergey Zigachev
1230b843c749SSergey Zigachev if (metadata_size)
1231b843c749SSergey Zigachev *metadata_size = bo->metadata_size;
1232b843c749SSergey Zigachev if (flags)
1233b843c749SSergey Zigachev *flags = bo->metadata_flags;
1234b843c749SSergey Zigachev
1235b843c749SSergey Zigachev return 0;
1236b843c749SSergey Zigachev }
1237b843c749SSergey Zigachev
1238b843c749SSergey Zigachev /**
1239b843c749SSergey Zigachev * amdgpu_bo_move_notify - notification about a memory move
1240b843c749SSergey Zigachev * @bo: pointer to a buffer object
1241b843c749SSergey Zigachev * @evict: if this move is evicting the buffer from the graphics address space
1242b843c749SSergey Zigachev * @new_mem: new information of the bufer object
1243b843c749SSergey Zigachev *
1244b843c749SSergey Zigachev * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1245b843c749SSergey Zigachev * bookkeeping.
1246b843c749SSergey Zigachev * TTM driver callback which is called when ttm moves a buffer.
1247b843c749SSergey Zigachev */
amdgpu_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_mem_reg * new_mem)1248b843c749SSergey Zigachev void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1249b843c749SSergey Zigachev bool evict,
1250b843c749SSergey Zigachev struct ttm_mem_reg *new_mem)
1251b843c749SSergey Zigachev {
1252b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1253b843c749SSergey Zigachev struct amdgpu_bo *abo;
1254*78973132SSergey Zigachev #if 0
1255b843c749SSergey Zigachev struct ttm_mem_reg *old_mem = &bo->mem;
1256*78973132SSergey Zigachev #endif
1257b843c749SSergey Zigachev
1258b843c749SSergey Zigachev if (!amdgpu_bo_is_amdgpu_bo(bo))
1259b843c749SSergey Zigachev return;
1260b843c749SSergey Zigachev
1261b843c749SSergey Zigachev abo = ttm_to_amdgpu_bo(bo);
1262b843c749SSergey Zigachev amdgpu_vm_bo_invalidate(adev, abo, evict);
1263b843c749SSergey Zigachev
1264b843c749SSergey Zigachev amdgpu_bo_kunmap(abo);
1265b843c749SSergey Zigachev
1266b843c749SSergey Zigachev /* remember the eviction */
1267b843c749SSergey Zigachev if (evict)
1268b843c749SSergey Zigachev atomic64_inc(&adev->num_evictions);
1269b843c749SSergey Zigachev
1270b843c749SSergey Zigachev /* update statistics */
1271b843c749SSergey Zigachev if (!new_mem)
1272b843c749SSergey Zigachev return;
1273b843c749SSergey Zigachev
1274b843c749SSergey Zigachev /* move_notify is called before move happens */
1275b843c749SSergey Zigachev trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1276b843c749SSergey Zigachev }
1277b843c749SSergey Zigachev
1278b843c749SSergey Zigachev /**
1279b843c749SSergey Zigachev * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1280b843c749SSergey Zigachev * @bo: pointer to a buffer object
1281b843c749SSergey Zigachev *
1282b843c749SSergey Zigachev * Notifies the driver we are taking a fault on this BO and have reserved it,
1283b843c749SSergey Zigachev * also performs bookkeeping.
1284b843c749SSergey Zigachev * TTM driver callback for dealing with vm faults.
1285b843c749SSergey Zigachev *
1286b843c749SSergey Zigachev * Returns:
1287b843c749SSergey Zigachev * 0 for success or a negative error code on failure.
1288b843c749SSergey Zigachev */
amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object * bo)1289b843c749SSergey Zigachev int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1290b843c749SSergey Zigachev {
1291b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1292b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { false, false };
1293b843c749SSergey Zigachev struct amdgpu_bo *abo;
1294b843c749SSergey Zigachev unsigned long offset, size;
1295b843c749SSergey Zigachev int r;
1296b843c749SSergey Zigachev
1297b843c749SSergey Zigachev if (!amdgpu_bo_is_amdgpu_bo(bo))
1298b843c749SSergey Zigachev return 0;
1299b843c749SSergey Zigachev
1300b843c749SSergey Zigachev abo = ttm_to_amdgpu_bo(bo);
1301b843c749SSergey Zigachev
1302b843c749SSergey Zigachev /* Remember that this BO was accessed by the CPU */
1303b843c749SSergey Zigachev abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1304b843c749SSergey Zigachev
1305b843c749SSergey Zigachev if (bo->mem.mem_type != TTM_PL_VRAM)
1306b843c749SSergey Zigachev return 0;
1307b843c749SSergey Zigachev
1308b843c749SSergey Zigachev size = bo->mem.num_pages << PAGE_SHIFT;
1309b843c749SSergey Zigachev offset = bo->mem.start << PAGE_SHIFT;
1310b843c749SSergey Zigachev if ((offset + size) <= adev->gmc.visible_vram_size)
1311b843c749SSergey Zigachev return 0;
1312b843c749SSergey Zigachev
1313b843c749SSergey Zigachev /* Can't move a pinned BO to visible VRAM */
1314b843c749SSergey Zigachev if (abo->pin_count > 0)
1315b843c749SSergey Zigachev return -EINVAL;
1316b843c749SSergey Zigachev
1317b843c749SSergey Zigachev /* hurrah the memory is not visible ! */
1318b843c749SSergey Zigachev atomic64_inc(&adev->num_vram_cpu_page_faults);
1319b843c749SSergey Zigachev amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1320b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_GTT);
1321b843c749SSergey Zigachev
1322b843c749SSergey Zigachev /* Avoid costly evictions; only set GTT as a busy placement */
1323b843c749SSergey Zigachev abo->placement.num_busy_placement = 1;
1324b843c749SSergey Zigachev abo->placement.busy_placement = &abo->placements[1];
1325b843c749SSergey Zigachev
1326b843c749SSergey Zigachev r = ttm_bo_validate(bo, &abo->placement, &ctx);
1327b843c749SSergey Zigachev if (unlikely(r != 0))
1328b843c749SSergey Zigachev return r;
1329b843c749SSergey Zigachev
1330b843c749SSergey Zigachev offset = bo->mem.start << PAGE_SHIFT;
1331b843c749SSergey Zigachev /* this should never happen */
1332b843c749SSergey Zigachev if (bo->mem.mem_type == TTM_PL_VRAM &&
1333b843c749SSergey Zigachev (offset + size) > adev->gmc.visible_vram_size)
1334b843c749SSergey Zigachev return -EINVAL;
1335b843c749SSergey Zigachev
1336b843c749SSergey Zigachev return 0;
1337b843c749SSergey Zigachev }
1338b843c749SSergey Zigachev
1339b843c749SSergey Zigachev /**
1340b843c749SSergey Zigachev * amdgpu_bo_fence - add fence to buffer object
1341b843c749SSergey Zigachev *
1342b843c749SSergey Zigachev * @bo: buffer object in question
1343b843c749SSergey Zigachev * @fence: fence to add
1344b843c749SSergey Zigachev * @shared: true if fence should be added shared
1345b843c749SSergey Zigachev *
1346b843c749SSergey Zigachev */
amdgpu_bo_fence(struct amdgpu_bo * bo,struct dma_fence * fence,bool shared)1347b843c749SSergey Zigachev void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1348b843c749SSergey Zigachev bool shared)
1349b843c749SSergey Zigachev {
1350b843c749SSergey Zigachev struct reservation_object *resv = bo->tbo.resv;
1351b843c749SSergey Zigachev
1352b843c749SSergey Zigachev if (shared)
1353b843c749SSergey Zigachev reservation_object_add_shared_fence(resv, fence);
1354b843c749SSergey Zigachev else
1355b843c749SSergey Zigachev reservation_object_add_excl_fence(resv, fence);
1356b843c749SSergey Zigachev }
1357b843c749SSergey Zigachev
1358b843c749SSergey Zigachev /**
1359b843c749SSergey Zigachev * amdgpu_bo_gpu_offset - return GPU offset of bo
1360b843c749SSergey Zigachev * @bo: amdgpu object for which we query the offset
1361b843c749SSergey Zigachev *
1362b843c749SSergey Zigachev * Note: object should either be pinned or reserved when calling this
1363b843c749SSergey Zigachev * function, it might be useful to add check for this for debugging.
1364b843c749SSergey Zigachev *
1365b843c749SSergey Zigachev * Returns:
1366b843c749SSergey Zigachev * current GPU offset of the object.
1367b843c749SSergey Zigachev */
amdgpu_bo_gpu_offset(struct amdgpu_bo * bo)1368b843c749SSergey Zigachev u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1369b843c749SSergey Zigachev {
1370b843c749SSergey Zigachev WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1371b843c749SSergey Zigachev WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1372b843c749SSergey Zigachev !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
1373b843c749SSergey Zigachev WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1374b843c749SSergey Zigachev !bo->pin_count);
1375b843c749SSergey Zigachev WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1376b843c749SSergey Zigachev WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1377b843c749SSergey Zigachev !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1378b843c749SSergey Zigachev
1379b843c749SSergey Zigachev return bo->tbo.offset;
1380b843c749SSergey Zigachev }
1381b843c749SSergey Zigachev
1382b843c749SSergey Zigachev /**
1383b843c749SSergey Zigachev * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1384b843c749SSergey Zigachev * @adev: amdgpu device object
1385b843c749SSergey Zigachev * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1386b843c749SSergey Zigachev *
1387b843c749SSergey Zigachev * Returns:
1388b843c749SSergey Zigachev * Which of the allowed domains is preferred for pinning the BO for scanout.
1389b843c749SSergey Zigachev */
amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device * adev,uint32_t domain)1390b843c749SSergey Zigachev uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1391b843c749SSergey Zigachev uint32_t domain)
1392b843c749SSergey Zigachev {
1393b843c749SSergey Zigachev if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1394b843c749SSergey Zigachev domain = AMDGPU_GEM_DOMAIN_VRAM;
1395b843c749SSergey Zigachev if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1396b843c749SSergey Zigachev domain = AMDGPU_GEM_DOMAIN_GTT;
1397b843c749SSergey Zigachev }
1398b843c749SSergey Zigachev return domain;
1399b843c749SSergey Zigachev }
1400