xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/radeon/radeon_gart.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 /*
33  * GART
34  * The GART (Graphics Aperture Remapping Table) is an aperture
35  * in the GPU's address space.  System pages can be mapped into
36  * the aperture and look like contiguous pages from the GPU's
37  * perspective.  A page table maps the pages in the aperture
38  * to the actual backing pages in system memory.
39  *
40  * Radeon GPUs support both an internal GART, as described above,
41  * and AGP.  AGP works similarly, but the GART table is configured
42  * and maintained by the northbridge rather than the driver.
43  * Radeon hw has a separate AGP aperture that is programmed to
44  * point to the AGP aperture provided by the northbridge and the
45  * requests are passed through to the northbridge aperture.
46  * Both AGP and internal GART can be used at the same time, however
47  * that is not currently supported by the driver.
48  *
49  * This file handles the common internal GART management.
50  */
51 
52 /*
53  * Common GART table functions.
54  */
55 /**
56  * radeon_gart_table_ram_alloc - allocate system ram for gart page table
57  *
58  * @rdev: radeon_device pointer
59  *
60  * Allocate system memory for GART page table
61  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
62  * gart table to be in system memory.
63  * Returns 0 for success, -ENOMEM for failure.
64  */
65 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
66 {
67 #ifdef __NetBSD__
68 	int rsegs;
69 	int error;
70 
71 	error = bus_dmamem_alloc(rdev->ddev->dmat, rdev->gart.table_size,
72 	    PAGE_SIZE, 0, &rdev->gart.rg_table_seg, 1, &rsegs, BUS_DMA_WAITOK);
73 	if (error)
74 		goto fail0;
75 	KASSERT(rsegs == 1);
76 	error = bus_dmamap_create(rdev->ddev->dmat, rdev->gart.table_size, 1,
77 	    rdev->gart.table_size, 0, BUS_DMA_WAITOK,
78 	    &rdev->gart.rg_table_map);
79 	if (error)
80 		goto fail1;
81 	error = bus_dmamem_map(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1,
82 	    rdev->gart.table_size, &rdev->gart.ptr,
83 	    BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
84 	if (error)
85 		goto fail2;
86 	error = bus_dmamap_load(rdev->ddev->dmat, rdev->gart.rg_table_map,
87 	    rdev->gart.ptr, rdev->gart.table_size, NULL, BUS_DMA_WAITOK);
88 	if (error)
89 		goto fail3;
90 
91 	/* Success!  */
92 	rdev->gart.table_addr = rdev->gart.rg_table_map->dm_segs[0].ds_addr;
93 	return 0;
94 
95 fail4: __unused
96 	bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
97 fail3:	bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
98 	    rdev->gart.table_size);
99 fail2:	bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
100 fail1:	bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
101 fail0:	KASSERT(error);
102 	/* XXX errno NetBSD->Linux */
103 	return -error;
104 #else
105 	void *ptr;
106 
107 	ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
108 				   &rdev->gart.table_addr);
109 	if (ptr == NULL) {
110 		return -ENOMEM;
111 	}
112 #ifdef CONFIG_X86
113 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
114 	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
115 		set_memory_uc((unsigned long)ptr,
116 			      rdev->gart.table_size >> PAGE_SHIFT);
117 	}
118 #endif
119 	rdev->gart.ptr = ptr;
120 	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
121 	return 0;
122 #endif
123 }
124 
125 /**
126  * radeon_gart_table_ram_free - free system ram for gart page table
127  *
128  * @rdev: radeon_device pointer
129  *
130  * Free system memory for GART page table
131  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
132  * gart table to be in system memory.
133  */
134 void radeon_gart_table_ram_free(struct radeon_device *rdev)
135 {
136 	if (rdev->gart.ptr == NULL) {
137 		return;
138 	}
139 #ifdef __NetBSD__
140 	bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
141 	bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
142 	    rdev->gart.table_size);
143 	bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
144 	bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
145 #else
146 #ifdef CONFIG_X86
147 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
148 	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
149 		set_memory_wb((unsigned long)rdev->gart.ptr,
150 			      rdev->gart.table_size >> PAGE_SHIFT);
151 	}
152 #endif
153 	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
154 			    (void *)rdev->gart.ptr,
155 			    rdev->gart.table_addr);
156 	rdev->gart.ptr = NULL;
157 	rdev->gart.table_addr = 0;
158 #endif
159 }
160 
161 /**
162  * radeon_gart_table_vram_alloc - allocate vram for gart page table
163  *
164  * @rdev: radeon_device pointer
165  *
166  * Allocate video memory for GART page table
167  * (pcie r4xx, r5xx+).  These asics require the
168  * gart table to be in video memory.
169  * Returns 0 for success, error for failure.
170  */
171 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
172 {
173 	int r;
174 
175 	if (rdev->gart.robj == NULL) {
176 		r = radeon_bo_create(rdev, rdev->gart.table_size,
177 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
178 				     NULL, &rdev->gart.robj);
179 		if (r) {
180 			return r;
181 		}
182 	}
183 	return 0;
184 }
185 
186 /**
187  * radeon_gart_table_vram_pin - pin gart page table in vram
188  *
189  * @rdev: radeon_device pointer
190  *
191  * Pin the GART page table in vram so it will not be moved
192  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
193  * gart table to be in video memory.
194  * Returns 0 for success, error for failure.
195  */
196 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
197 {
198 	uint64_t gpu_addr;
199 	int r;
200 
201 	r = radeon_bo_reserve(rdev->gart.robj, false);
202 	if (unlikely(r != 0))
203 		return r;
204 	r = radeon_bo_pin(rdev->gart.robj,
205 				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
206 	if (r) {
207 		radeon_bo_unreserve(rdev->gart.robj);
208 		return r;
209 	}
210 	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
211 	if (r)
212 		radeon_bo_unpin(rdev->gart.robj);
213 	radeon_bo_unreserve(rdev->gart.robj);
214 	rdev->gart.table_addr = gpu_addr;
215 	return r;
216 }
217 
218 /**
219  * radeon_gart_table_vram_unpin - unpin gart page table in vram
220  *
221  * @rdev: radeon_device pointer
222  *
223  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
224  * These asics require the gart table to be in video memory.
225  */
226 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
227 {
228 	int r;
229 
230 	if (rdev->gart.robj == NULL) {
231 		return;
232 	}
233 	r = radeon_bo_reserve(rdev->gart.robj, false);
234 	if (likely(r == 0)) {
235 		radeon_bo_kunmap(rdev->gart.robj);
236 		radeon_bo_unpin(rdev->gart.robj);
237 		radeon_bo_unreserve(rdev->gart.robj);
238 		rdev->gart.ptr = NULL;
239 	}
240 }
241 
242 /**
243  * radeon_gart_table_vram_free - free gart page table vram
244  *
245  * @rdev: radeon_device pointer
246  *
247  * Free the video memory used for the GART page table
248  * (pcie r4xx, r5xx+).  These asics require the gart table to
249  * be in video memory.
250  */
251 void radeon_gart_table_vram_free(struct radeon_device *rdev)
252 {
253 	if (rdev->gart.robj == NULL) {
254 		return;
255 	}
256 	radeon_bo_unref(&rdev->gart.robj);
257 }
258 
259 #ifdef __NetBSD__
260 static void
261 radeon_gart_pre_update(struct radeon_device *rdev, unsigned gpu_pgstart,
262     unsigned gpu_npages)
263 {
264 
265 	if (rdev->gart.rg_table_map != NULL) {
266 		const unsigned entsize =
267 		    rdev->gart.table_size / rdev->gart.num_gpu_pages;
268 
269 		bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
270 		    gpu_pgstart*entsize, gpu_npages*entsize,
271 		    BUS_DMASYNC_PREWRITE);
272 	}
273 }
274 
275 static void
276 radeon_gart_post_update(struct radeon_device *rdev, unsigned gpu_pgstart,
277     unsigned gpu_npages)
278 {
279 
280 	membar_sync();		/* XXX overkill */
281 	if (rdev->gart.rg_table_map != NULL) {
282 		const unsigned entsize =
283 		    rdev->gart.table_size / rdev->gart.num_gpu_pages;
284 
285 		bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
286 		    gpu_pgstart*entsize, gpu_npages*entsize,
287 		    BUS_DMASYNC_POSTWRITE);
288 	}
289 	radeon_gart_tlb_flush(rdev);
290 }
291 #endif
292 
293 /*
294  * Common gart functions.
295  */
296 #ifdef __NetBSD__
297 void
298 radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
299     unsigned npages)
300 {
301 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
302 	const unsigned gpu_npages = (npages * gpu_per_cpu);
303 	const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
304 	const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
305 	unsigned pgno, gpu_pgno;
306 
307 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
308 	KASSERT(npages <= rdev->gart.num_cpu_pages);
309 	KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
310 
311 	if (!rdev->gart.ready) {
312 		WARN(1, "trying to bind memory to uninitialized GART !\n");
313 		return;
314 	}
315 
316 	radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
317 	for (pgno = 0; pgno < npages; pgno++) {
318 		if (rdev->gart.pages[pgstart + pgno] == NULL)
319 			continue;
320 		rdev->gart.pages[pgstart + pgno] = NULL;
321 		rdev->gart.pages_addr[pgstart + pgno] = rdev->dummy_page.addr;
322 		if (rdev->gart.ptr == NULL)
323 			continue;
324 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
325 			radeon_gart_set_page(rdev,
326 			    (gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno),
327 			    (rdev->dummy_page.addr +
328 				gpu_pgno*RADEON_GPU_PAGE_SIZE));
329 	}
330 	radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
331 }
332 #else
333 /**
334  * radeon_gart_unbind - unbind pages from the gart page table
335  *
336  * @rdev: radeon_device pointer
337  * @offset: offset into the GPU's gart aperture
338  * @pages: number of pages to unbind
339  *
340  * Unbinds the requested pages from the gart page table and
341  * replaces them with the dummy page (all asics).
342  */
343 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
344 			int pages)
345 {
346 	unsigned t;
347 	unsigned p;
348 	int i, j;
349 	u64 page_base;
350 
351 	if (!rdev->gart.ready) {
352 		WARN(1, "trying to unbind memory from uninitialized GART !\n");
353 		return;
354 	}
355 	t = offset / RADEON_GPU_PAGE_SIZE;
356 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
357 	for (i = 0; i < pages; i++, p++) {
358 		if (rdev->gart.pages[p]) {
359 			rdev->gart.pages[p] = NULL;
360 			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
361 			page_base = rdev->gart.pages_addr[p];
362 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
363 				if (rdev->gart.ptr) {
364 					radeon_gart_set_page(rdev, t, page_base);
365 				}
366 				page_base += RADEON_GPU_PAGE_SIZE;
367 			}
368 		}
369 	}
370 	mb();
371 	radeon_gart_tlb_flush(rdev);
372 }
373 #endif
374 
375 #ifdef __NetBSD__
376 int
377 radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
378     unsigned npages, struct page **pages, bus_dmamap_t dmamap)
379 {
380 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
381 	const unsigned gpu_npages = (npages * gpu_per_cpu);
382 	const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
383 	const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
384 	unsigned pgno, gpu_pgno;
385 
386 	KASSERT(pgstart == (gpu_start / PAGE_SIZE));
387 	KASSERT(npages == dmamap->dm_nsegs);
388 	KASSERT(npages <= rdev->gart.num_cpu_pages);
389 	KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
390 
391 	if (!rdev->gart.ready) {
392 		WARN(1, "trying to bind memory to uninitialized GART !\n");
393 		return -EINVAL;
394 	}
395 
396 	radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
397 	for (pgno = 0; pgno < npages; pgno++) {
398 		const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
399 
400 		KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
401 		rdev->gart.pages[pgstart + pgno] = pages[pgno];
402 		rdev->gart.pages_addr[pgstart + pgno] = addr;
403 		if (rdev->gart.ptr == NULL)
404 			continue;
405 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
406 			radeon_gart_set_page(rdev,
407 			    (gpu_pgstart + gpu_per_cpu*pgno + gpu_pgno),
408 			    (addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
409 	}
410 	radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
411 
412 	return 0;
413 }
414 #else
415 /**
416  * radeon_gart_bind - bind pages into the gart page table
417  *
418  * @rdev: radeon_device pointer
419  * @offset: offset into the GPU's gart aperture
420  * @pages: number of pages to bind
421  * @pagelist: pages to bind
422  * @dma_addr: DMA addresses of pages
423  *
424  * Binds the requested pages to the gart page table
425  * (all asics).
426  * Returns 0 for success, -EINVAL for failure.
427  */
428 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
429 		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
430 {
431 	unsigned t;
432 	unsigned p;
433 	uint64_t page_base;
434 	int i, j;
435 
436 	if (!rdev->gart.ready) {
437 		WARN(1, "trying to bind memory to uninitialized GART !\n");
438 		return -EINVAL;
439 	}
440 	t = offset / RADEON_GPU_PAGE_SIZE;
441 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
442 
443 	for (i = 0; i < pages; i++, p++) {
444 		rdev->gart.pages_addr[p] = dma_addr[i];
445 		rdev->gart.pages[p] = pagelist[i];
446 		if (rdev->gart.ptr) {
447 			page_base = rdev->gart.pages_addr[p];
448 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
449 				radeon_gart_set_page(rdev, t, page_base);
450 				page_base += RADEON_GPU_PAGE_SIZE;
451 			}
452 		}
453 	}
454 	mb();
455 	radeon_gart_tlb_flush(rdev);
456 	return 0;
457 }
458 #endif
459 
460 /**
461  * radeon_gart_restore - bind all pages in the gart page table
462  *
463  * @rdev: radeon_device pointer
464  *
465  * Binds all pages in the gart page table (all asics).
466  * Used to rebuild the gart table on device startup or resume.
467  */
468 void radeon_gart_restore(struct radeon_device *rdev)
469 {
470 #ifdef __NetBSD__
471 	const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
472 	unsigned pgno, gpu_pgno;
473 
474 	if (rdev->gart.ptr == NULL)
475 		return;
476 
477 	radeon_gart_pre_update(rdev, 0, rdev->gart.num_gpu_pages);
478 	for (pgno = 0; pgno < rdev->gart.num_cpu_pages; pgno++) {
479 		const bus_addr_t addr = rdev->gart.pages_addr[pgno];
480 		for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
481 			radeon_gart_set_page(rdev,
482 			    (gpu_per_cpu*pgno + gpu_pgno),
483 			    (addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
484 	}
485 	radeon_gart_post_update(rdev, 0, rdev->gart.num_gpu_pages);
486 #else
487 	int i, j, t;
488 	u64 page_base;
489 
490 	if (!rdev->gart.ptr) {
491 		return;
492 	}
493 	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
494 		page_base = rdev->gart.pages_addr[i];
495 		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
496 			radeon_gart_set_page(rdev, t, page_base);
497 			page_base += RADEON_GPU_PAGE_SIZE;
498 		}
499 	}
500 	mb();
501 	radeon_gart_tlb_flush(rdev);
502 #endif
503 }
504 
505 /**
506  * radeon_gart_init - init the driver info for managing the gart
507  *
508  * @rdev: radeon_device pointer
509  *
510  * Allocate the dummy page and init the gart driver info (all asics).
511  * Returns 0 for success, error for failure.
512  */
513 int radeon_gart_init(struct radeon_device *rdev)
514 {
515 	int r, i;
516 
517 	if (rdev->gart.pages) {
518 		return 0;
519 	}
520 	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
521 	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
522 		DRM_ERROR("Page size is smaller than GPU page size!\n");
523 		return -EINVAL;
524 	}
525 	r = radeon_dummy_page_init(rdev);
526 	if (r)
527 		return r;
528 	/* Compute table size */
529 	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
530 	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
531 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
532 		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
533 	/* Allocate pages table */
534 	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
535 	if (rdev->gart.pages == NULL) {
536 		radeon_gart_fini(rdev);
537 		return -ENOMEM;
538 	}
539 	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
540 					rdev->gart.num_cpu_pages);
541 	if (rdev->gart.pages_addr == NULL) {
542 		radeon_gart_fini(rdev);
543 		return -ENOMEM;
544 	}
545 	/* set GART entry to point to the dummy page by default */
546 	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
547 		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
548 	}
549 	return 0;
550 }
551 
552 /**
553  * radeon_gart_fini - tear down the driver info for managing the gart
554  *
555  * @rdev: radeon_device pointer
556  *
557  * Tear down the gart driver info and free the dummy page (all asics).
558  */
559 void radeon_gart_fini(struct radeon_device *rdev)
560 {
561 	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
562 		/* unbind pages */
563 		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
564 	}
565 	rdev->gart.ready = false;
566 	vfree(rdev->gart.pages);
567 	vfree(rdev->gart.pages_addr);
568 	rdev->gart.pages = NULL;
569 	rdev->gart.pages_addr = NULL;
570 
571 	radeon_dummy_page_fini(rdev);
572 }
573