1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2016 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev *
4b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev *
11b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev *
14b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev *
22b843c749SSergey Zigachev * Authors: Christian König
23b843c749SSergey Zigachev */
24b843c749SSergey Zigachev
25b843c749SSergey Zigachev #include <drm/drmP.h>
26b843c749SSergey Zigachev #include "amdgpu.h"
27b843c749SSergey Zigachev
28b843c749SSergey Zigachev struct amdgpu_gtt_mgr {
29b843c749SSergey Zigachev struct drm_mm mm;
30*78973132SSergey Zigachev struct spinlock lock;
31b843c749SSergey Zigachev atomic64_t available;
32b843c749SSergey Zigachev };
33b843c749SSergey Zigachev
34b843c749SSergey Zigachev struct amdgpu_gtt_node {
35b843c749SSergey Zigachev struct drm_mm_node node;
36b843c749SSergey Zigachev struct ttm_buffer_object *tbo;
37b843c749SSergey Zigachev };
38b843c749SSergey Zigachev
39b843c749SSergey Zigachev /**
40b843c749SSergey Zigachev * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
41b843c749SSergey Zigachev *
42b843c749SSergey Zigachev * @man: TTM memory type manager
43b843c749SSergey Zigachev * @p_size: maximum size of GTT
44b843c749SSergey Zigachev *
45b843c749SSergey Zigachev * Allocate and initialize the GTT manager.
46b843c749SSergey Zigachev */
amdgpu_gtt_mgr_init(struct ttm_mem_type_manager * man,unsigned long p_size)47b843c749SSergey Zigachev static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
48b843c749SSergey Zigachev unsigned long p_size)
49b843c749SSergey Zigachev {
50b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
51b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr;
52b843c749SSergey Zigachev uint64_t start, size;
53b843c749SSergey Zigachev
54b843c749SSergey Zigachev mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
55b843c749SSergey Zigachev if (!mgr)
56b843c749SSergey Zigachev return -ENOMEM;
57b843c749SSergey Zigachev
58b843c749SSergey Zigachev start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
59b843c749SSergey Zigachev size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
60b843c749SSergey Zigachev drm_mm_init(&mgr->mm, start, size);
61*78973132SSergey Zigachev spin_init(&mgr->lock, "aggmml");
62b843c749SSergey Zigachev atomic64_set(&mgr->available, p_size);
63b843c749SSergey Zigachev man->priv = mgr;
64b843c749SSergey Zigachev return 0;
65b843c749SSergey Zigachev }
66b843c749SSergey Zigachev
67b843c749SSergey Zigachev /**
68b843c749SSergey Zigachev * amdgpu_gtt_mgr_fini - free and destroy GTT manager
69b843c749SSergey Zigachev *
70b843c749SSergey Zigachev * @man: TTM memory type manager
71b843c749SSergey Zigachev *
72b843c749SSergey Zigachev * Destroy and free the GTT manager, returns -EBUSY if ranges are still
73b843c749SSergey Zigachev * allocated inside it.
74b843c749SSergey Zigachev */
amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager * man)75b843c749SSergey Zigachev static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
76b843c749SSergey Zigachev {
77b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
78b843c749SSergey Zigachev spin_lock(&mgr->lock);
79b843c749SSergey Zigachev drm_mm_takedown(&mgr->mm);
80b843c749SSergey Zigachev spin_unlock(&mgr->lock);
81b843c749SSergey Zigachev kfree(mgr);
82b843c749SSergey Zigachev man->priv = NULL;
83b843c749SSergey Zigachev return 0;
84b843c749SSergey Zigachev }
85b843c749SSergey Zigachev
86b843c749SSergey Zigachev /**
87b843c749SSergey Zigachev * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
88b843c749SSergey Zigachev *
89b843c749SSergey Zigachev * @mem: the mem object to check
90b843c749SSergey Zigachev *
91b843c749SSergey Zigachev * Check if a mem object has already address space allocated.
92b843c749SSergey Zigachev */
amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg * mem)93b843c749SSergey Zigachev bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
94b843c749SSergey Zigachev {
95b843c749SSergey Zigachev struct amdgpu_gtt_node *node = mem->mm_node;
96b843c749SSergey Zigachev
97b843c749SSergey Zigachev return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
98b843c749SSergey Zigachev }
99b843c749SSergey Zigachev
100b843c749SSergey Zigachev /**
101b843c749SSergey Zigachev * amdgpu_gtt_mgr_alloc - allocate new ranges
102b843c749SSergey Zigachev *
103b843c749SSergey Zigachev * @man: TTM memory type manager
104b843c749SSergey Zigachev * @tbo: TTM BO we need this range for
105b843c749SSergey Zigachev * @place: placement flags and restrictions
106b843c749SSergey Zigachev * @mem: the resulting mem object
107b843c749SSergey Zigachev *
108b843c749SSergey Zigachev * Allocate the address space for a node.
109b843c749SSergey Zigachev */
amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager * man,struct ttm_buffer_object * tbo,const struct ttm_place * place,struct ttm_mem_reg * mem)110b843c749SSergey Zigachev static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
111b843c749SSergey Zigachev struct ttm_buffer_object *tbo,
112b843c749SSergey Zigachev const struct ttm_place *place,
113b843c749SSergey Zigachev struct ttm_mem_reg *mem)
114b843c749SSergey Zigachev {
115b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
116b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
117b843c749SSergey Zigachev struct amdgpu_gtt_node *node = mem->mm_node;
118b843c749SSergey Zigachev enum drm_mm_insert_mode mode;
119b843c749SSergey Zigachev unsigned long fpfn, lpfn;
120b843c749SSergey Zigachev int r;
121b843c749SSergey Zigachev
122b843c749SSergey Zigachev if (amdgpu_gtt_mgr_has_gart_addr(mem))
123b843c749SSergey Zigachev return 0;
124b843c749SSergey Zigachev
125b843c749SSergey Zigachev if (place)
126b843c749SSergey Zigachev fpfn = place->fpfn;
127b843c749SSergey Zigachev else
128b843c749SSergey Zigachev fpfn = 0;
129b843c749SSergey Zigachev
130b843c749SSergey Zigachev if (place && place->lpfn)
131b843c749SSergey Zigachev lpfn = place->lpfn;
132b843c749SSergey Zigachev else
133b843c749SSergey Zigachev lpfn = adev->gart.num_cpu_pages;
134b843c749SSergey Zigachev
135b843c749SSergey Zigachev mode = DRM_MM_INSERT_BEST;
136b843c749SSergey Zigachev if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
137b843c749SSergey Zigachev mode = DRM_MM_INSERT_HIGH;
138b843c749SSergey Zigachev
139b843c749SSergey Zigachev spin_lock(&mgr->lock);
140b843c749SSergey Zigachev r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
141b843c749SSergey Zigachev mem->page_alignment, 0, fpfn, lpfn,
142b843c749SSergey Zigachev mode);
143b843c749SSergey Zigachev spin_unlock(&mgr->lock);
144b843c749SSergey Zigachev
145b843c749SSergey Zigachev if (!r)
146b843c749SSergey Zigachev mem->start = node->node.start;
147b843c749SSergey Zigachev
148b843c749SSergey Zigachev return r;
149b843c749SSergey Zigachev }
150b843c749SSergey Zigachev
151b843c749SSergey Zigachev /**
152b843c749SSergey Zigachev * amdgpu_gtt_mgr_new - allocate a new node
153b843c749SSergey Zigachev *
154b843c749SSergey Zigachev * @man: TTM memory type manager
155b843c749SSergey Zigachev * @tbo: TTM BO we need this range for
156b843c749SSergey Zigachev * @place: placement flags and restrictions
157b843c749SSergey Zigachev * @mem: the resulting mem object
158b843c749SSergey Zigachev *
159b843c749SSergey Zigachev * Dummy, allocate the node but no space for it yet.
160b843c749SSergey Zigachev */
amdgpu_gtt_mgr_new(struct ttm_mem_type_manager * man,struct ttm_buffer_object * tbo,const struct ttm_place * place,struct ttm_mem_reg * mem)161b843c749SSergey Zigachev static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
162b843c749SSergey Zigachev struct ttm_buffer_object *tbo,
163b843c749SSergey Zigachev const struct ttm_place *place,
164b843c749SSergey Zigachev struct ttm_mem_reg *mem)
165b843c749SSergey Zigachev {
166b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
167b843c749SSergey Zigachev struct amdgpu_gtt_node *node;
168b843c749SSergey Zigachev int r;
169b843c749SSergey Zigachev
170b843c749SSergey Zigachev spin_lock(&mgr->lock);
171b843c749SSergey Zigachev if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
172b843c749SSergey Zigachev atomic64_read(&mgr->available) < mem->num_pages) {
173b843c749SSergey Zigachev spin_unlock(&mgr->lock);
174b843c749SSergey Zigachev return 0;
175b843c749SSergey Zigachev }
176b843c749SSergey Zigachev atomic64_sub(mem->num_pages, &mgr->available);
177b843c749SSergey Zigachev spin_unlock(&mgr->lock);
178b843c749SSergey Zigachev
179b843c749SSergey Zigachev node = kzalloc(sizeof(*node), GFP_KERNEL);
180b843c749SSergey Zigachev if (!node) {
181b843c749SSergey Zigachev r = -ENOMEM;
182b843c749SSergey Zigachev goto err_out;
183b843c749SSergey Zigachev }
184b843c749SSergey Zigachev
185b843c749SSergey Zigachev node->node.start = AMDGPU_BO_INVALID_OFFSET;
186b843c749SSergey Zigachev node->node.size = mem->num_pages;
187b843c749SSergey Zigachev node->tbo = tbo;
188b843c749SSergey Zigachev mem->mm_node = node;
189b843c749SSergey Zigachev
190b843c749SSergey Zigachev if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
191b843c749SSergey Zigachev r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
192b843c749SSergey Zigachev if (unlikely(r)) {
193b843c749SSergey Zigachev kfree(node);
194b843c749SSergey Zigachev mem->mm_node = NULL;
195b843c749SSergey Zigachev r = 0;
196b843c749SSergey Zigachev goto err_out;
197b843c749SSergey Zigachev }
198b843c749SSergey Zigachev } else {
199b843c749SSergey Zigachev mem->start = node->node.start;
200b843c749SSergey Zigachev }
201b843c749SSergey Zigachev
202b843c749SSergey Zigachev return 0;
203b843c749SSergey Zigachev err_out:
204b843c749SSergey Zigachev atomic64_add(mem->num_pages, &mgr->available);
205b843c749SSergey Zigachev
206b843c749SSergey Zigachev return r;
207b843c749SSergey Zigachev }
208b843c749SSergey Zigachev
209b843c749SSergey Zigachev /**
210b843c749SSergey Zigachev * amdgpu_gtt_mgr_del - free ranges
211b843c749SSergey Zigachev *
212b843c749SSergey Zigachev * @man: TTM memory type manager
213b843c749SSergey Zigachev * @tbo: TTM BO we need this range for
214b843c749SSergey Zigachev * @place: placement flags and restrictions
215b843c749SSergey Zigachev * @mem: TTM memory object
216b843c749SSergey Zigachev *
217b843c749SSergey Zigachev * Free the allocated GTT again.
218b843c749SSergey Zigachev */
amdgpu_gtt_mgr_del(struct ttm_mem_type_manager * man,struct ttm_mem_reg * mem)219b843c749SSergey Zigachev static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
220b843c749SSergey Zigachev struct ttm_mem_reg *mem)
221b843c749SSergey Zigachev {
222b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
223b843c749SSergey Zigachev struct amdgpu_gtt_node *node = mem->mm_node;
224b843c749SSergey Zigachev
225b843c749SSergey Zigachev if (!node)
226b843c749SSergey Zigachev return;
227b843c749SSergey Zigachev
228b843c749SSergey Zigachev spin_lock(&mgr->lock);
229b843c749SSergey Zigachev if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
230b843c749SSergey Zigachev drm_mm_remove_node(&node->node);
231b843c749SSergey Zigachev spin_unlock(&mgr->lock);
232b843c749SSergey Zigachev atomic64_add(mem->num_pages, &mgr->available);
233b843c749SSergey Zigachev
234b843c749SSergey Zigachev kfree(node);
235b843c749SSergey Zigachev mem->mm_node = NULL;
236b843c749SSergey Zigachev }
237b843c749SSergey Zigachev
238b843c749SSergey Zigachev /**
239b843c749SSergey Zigachev * amdgpu_gtt_mgr_usage - return usage of GTT domain
240b843c749SSergey Zigachev *
241b843c749SSergey Zigachev * @man: TTM memory type manager
242b843c749SSergey Zigachev *
243b843c749SSergey Zigachev * Return how many bytes are used in the GTT domain
244b843c749SSergey Zigachev */
amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager * man)245b843c749SSergey Zigachev uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
246b843c749SSergey Zigachev {
247b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
248b843c749SSergey Zigachev s64 result = man->size - atomic64_read(&mgr->available);
249b843c749SSergey Zigachev
250b843c749SSergey Zigachev return (result > 0 ? result : 0) * PAGE_SIZE;
251b843c749SSergey Zigachev }
252b843c749SSergey Zigachev
amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager * man)253b843c749SSergey Zigachev int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
254b843c749SSergey Zigachev {
255b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
256b843c749SSergey Zigachev struct amdgpu_gtt_node *node;
257b843c749SSergey Zigachev struct drm_mm_node *mm_node;
258b843c749SSergey Zigachev int r = 0;
259b843c749SSergey Zigachev
260b843c749SSergey Zigachev spin_lock(&mgr->lock);
261b843c749SSergey Zigachev drm_mm_for_each_node(mm_node, &mgr->mm) {
262b843c749SSergey Zigachev node = container_of(mm_node, struct amdgpu_gtt_node, node);
263b843c749SSergey Zigachev r = amdgpu_ttm_recover_gart(node->tbo);
264b843c749SSergey Zigachev if (r)
265b843c749SSergey Zigachev break;
266b843c749SSergey Zigachev }
267b843c749SSergey Zigachev spin_unlock(&mgr->lock);
268b843c749SSergey Zigachev
269b843c749SSergey Zigachev return r;
270b843c749SSergey Zigachev }
271b843c749SSergey Zigachev
272b843c749SSergey Zigachev /**
273b843c749SSergey Zigachev * amdgpu_gtt_mgr_debug - dump VRAM table
274b843c749SSergey Zigachev *
275b843c749SSergey Zigachev * @man: TTM memory type manager
276b843c749SSergey Zigachev * @printer: DRM printer to use
277b843c749SSergey Zigachev *
278b843c749SSergey Zigachev * Dump the table content using printk.
279b843c749SSergey Zigachev */
amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager * man,struct drm_printer * printer)280b843c749SSergey Zigachev static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
281b843c749SSergey Zigachev struct drm_printer *printer)
282b843c749SSergey Zigachev {
283b843c749SSergey Zigachev struct amdgpu_gtt_mgr *mgr = man->priv;
284b843c749SSergey Zigachev
285b843c749SSergey Zigachev spin_lock(&mgr->lock);
286b843c749SSergey Zigachev drm_mm_print(&mgr->mm, printer);
287b843c749SSergey Zigachev spin_unlock(&mgr->lock);
288b843c749SSergey Zigachev
289*78973132SSergey Zigachev drm_printf(printer, "man size:%lu pages, gtt available:%lld pages, usage:%luMB\n",
290b843c749SSergey Zigachev man->size, (u64)atomic64_read(&mgr->available),
291b843c749SSergey Zigachev amdgpu_gtt_mgr_usage(man) >> 20);
292b843c749SSergey Zigachev }
293b843c749SSergey Zigachev
294b843c749SSergey Zigachev const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
295b843c749SSergey Zigachev .init = amdgpu_gtt_mgr_init,
296b843c749SSergey Zigachev .takedown = amdgpu_gtt_mgr_fini,
297b843c749SSergey Zigachev .get_node = amdgpu_gtt_mgr_new,
298b843c749SSergey Zigachev .put_node = amdgpu_gtt_mgr_del,
299b843c749SSergey Zigachev .debug = amdgpu_gtt_mgr_debug
300b843c749SSergey Zigachev };
301