17f4dd379Sjsg /* SPDX-License-Identifier: GPL-2.0 OR MIT */
21099013bSjsg /**************************************************************************
31099013bSjsg *
41099013bSjsg * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
51099013bSjsg * All Rights Reserved.
61099013bSjsg *
71099013bSjsg * Permission is hereby granted, free of charge, to any person obtaining a
81099013bSjsg * copy of this software and associated documentation files (the
91099013bSjsg * "Software"), to deal in the Software without restriction, including
101099013bSjsg * without limitation the rights to use, copy, modify, merge, publish,
111099013bSjsg * distribute, sub license, and/or sell copies of the Software, and to
121099013bSjsg * permit persons to whom the Software is furnished to do so, subject to
131099013bSjsg * the following conditions:
141099013bSjsg *
151099013bSjsg * The above copyright notice and this permission notice (including the
161099013bSjsg * next paragraph) shall be included in all copies or substantial portions
171099013bSjsg * of the Software.
181099013bSjsg *
191099013bSjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
201099013bSjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
211099013bSjsg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
221099013bSjsg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
231099013bSjsg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
241099013bSjsg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
251099013bSjsg * USE OR OTHER DEALINGS IN THE SOFTWARE.
261099013bSjsg *
271099013bSjsg **************************************************************************/
281099013bSjsg /*
291099013bSjsg * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
301099013bSjsg */
311099013bSjsg
327f4dd379Sjsg #include <linux/vmalloc.h>
33f005ef32Sjsg
34f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
35f005ef32Sjsg #include <drm/ttm/ttm_placement.h>
36f005ef32Sjsg #include <drm/ttm/ttm_tt.h>
37f005ef32Sjsg
38f005ef32Sjsg #include <drm/drm_cache.h>
397f4dd379Sjsg
407f4dd379Sjsg struct ttm_transfer_obj {
417f4dd379Sjsg struct ttm_buffer_object base;
427f4dd379Sjsg struct ttm_buffer_object *bo;
437f4dd379Sjsg };
441099013bSjsg
ttm_mem_io_reserve(struct ttm_device * bdev,struct ttm_resource * mem)455ca02815Sjsg int ttm_mem_io_reserve(struct ttm_device *bdev,
46ad8b1aafSjsg struct ttm_resource *mem)
471099013bSjsg {
48ad8b1aafSjsg if (mem->bus.offset || mem->bus.addr)
49ad8b1aafSjsg return 0;
501099013bSjsg
51ad8b1aafSjsg mem->bus.is_iomem = false;
525ca02815Sjsg if (!bdev->funcs->io_mem_reserve)
531099013bSjsg return 0;
541099013bSjsg
555ca02815Sjsg return bdev->funcs->io_mem_reserve(bdev, mem);
561099013bSjsg }
571099013bSjsg
ttm_mem_io_free(struct ttm_device * bdev,struct ttm_resource * mem)585ca02815Sjsg void ttm_mem_io_free(struct ttm_device *bdev,
59ad8b1aafSjsg struct ttm_resource *mem)
601099013bSjsg {
615ca02815Sjsg if (!mem)
625ca02815Sjsg return;
635ca02815Sjsg
64ad8b1aafSjsg if (!mem->bus.offset && !mem->bus.addr)
651099013bSjsg return;
661099013bSjsg
675ca02815Sjsg if (bdev->funcs->io_mem_free)
685ca02815Sjsg bdev->funcs->io_mem_free(bdev, mem);
691099013bSjsg
70ad8b1aafSjsg mem->bus.offset = 0;
71ad8b1aafSjsg mem->bus.addr = NULL;
721099013bSjsg }
731099013bSjsg
745ca02815Sjsg /**
755ca02815Sjsg * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
761bb76ff1Sjsg * @clear: Whether to clear rather than copy.
771bb76ff1Sjsg * @num_pages: Number of pages of the operation.
781bb76ff1Sjsg * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
795ca02815Sjsg * @src_iter: A struct ttm_kmap_iter representing the source resource.
805ca02815Sjsg *
815ca02815Sjsg * This function is intended to be able to move out async under a
825ca02815Sjsg * dma-fence if desired.
835ca02815Sjsg */
ttm_move_memcpy(bool clear,u32 num_pages,struct ttm_kmap_iter * dst_iter,struct ttm_kmap_iter * src_iter,bus_space_tag_t memt)841bb76ff1Sjsg void ttm_move_memcpy(bool clear,
855ca02815Sjsg u32 num_pages,
865ca02815Sjsg struct ttm_kmap_iter *dst_iter,
871bb76ff1Sjsg struct ttm_kmap_iter *src_iter,
881bb76ff1Sjsg bus_space_tag_t memt)
891099013bSjsg {
905ca02815Sjsg const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
915ca02815Sjsg const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
921bb76ff1Sjsg struct iosys_map src_map, dst_map;
935ca02815Sjsg pgoff_t i;
941099013bSjsg
955ca02815Sjsg /* Single TTM move. NOP */
965ca02815Sjsg if (dst_ops->maps_tt && src_ops->maps_tt)
975ca02815Sjsg return;
981099013bSjsg
995ca02815Sjsg /* Don't move nonexistent data. Clear destination instead. */
1001bb76ff1Sjsg if (clear) {
1015ca02815Sjsg for (i = 0; i < num_pages; ++i) {
1021bb76ff1Sjsg dst_ops->map_local(dst_iter, &dst_map, i, memt);
1035ca02815Sjsg if (dst_map.is_iomem)
1045ca02815Sjsg memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
1051099013bSjsg else
1065ca02815Sjsg memset(dst_map.vaddr, 0, PAGE_SIZE);
1075ca02815Sjsg if (dst_ops->unmap_local)
1081bb76ff1Sjsg dst_ops->unmap_local(dst_iter, &dst_map, memt);
1095ca02815Sjsg }
1105ca02815Sjsg return;
1111099013bSjsg }
1121099013bSjsg
1135ca02815Sjsg for (i = 0; i < num_pages; ++i) {
1141bb76ff1Sjsg dst_ops->map_local(dst_iter, &dst_map, i, memt);
1151bb76ff1Sjsg src_ops->map_local(src_iter, &src_map, i, memt);
1161099013bSjsg
1175ca02815Sjsg drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
1185ca02815Sjsg
1195ca02815Sjsg if (src_ops->unmap_local)
1201bb76ff1Sjsg src_ops->unmap_local(src_iter, &src_map, memt);
1215ca02815Sjsg if (dst_ops->unmap_local)
1221bb76ff1Sjsg dst_ops->unmap_local(dst_iter, &dst_map, memt);
1231099013bSjsg }
1241099013bSjsg }
1255ca02815Sjsg EXPORT_SYMBOL(ttm_move_memcpy);
1261099013bSjsg
127f005ef32Sjsg /**
128f005ef32Sjsg * ttm_bo_move_memcpy
129f005ef32Sjsg *
130f005ef32Sjsg * @bo: A pointer to a struct ttm_buffer_object.
131f005ef32Sjsg * @ctx: operation context
132f005ef32Sjsg * @dst_mem: struct ttm_resource indicating where to move.
133f005ef32Sjsg *
134f005ef32Sjsg * Fallback move function for a mappable buffer object in mappable memory.
135f005ef32Sjsg * The function will, if successful,
136f005ef32Sjsg * free any old aperture space, and set (@new_mem)->mm_node to NULL,
137f005ef32Sjsg * and update the (@bo)->mem placement flags. If unsuccessful, the old
138f005ef32Sjsg * data remains untouched, and it's up to the caller to free the
139f005ef32Sjsg * memory space indicated by @new_mem.
140f005ef32Sjsg * Returns:
141f005ef32Sjsg * !0: Failure.
142f005ef32Sjsg */
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_resource * dst_mem)1431099013bSjsg int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1447f4dd379Sjsg struct ttm_operation_ctx *ctx,
1455ca02815Sjsg struct ttm_resource *dst_mem)
1461099013bSjsg {
1475ca02815Sjsg struct ttm_device *bdev = bo->bdev;
1485ca02815Sjsg struct ttm_resource_manager *dst_man =
1495ca02815Sjsg ttm_manager_type(bo->bdev, dst_mem->mem_type);
1501099013bSjsg struct ttm_tt *ttm = bo->ttm;
1515ca02815Sjsg struct ttm_resource *src_mem = bo->resource;
1521bb76ff1Sjsg struct ttm_resource_manager *src_man;
1535ca02815Sjsg union {
1545ca02815Sjsg struct ttm_kmap_iter_tt tt;
1555ca02815Sjsg struct ttm_kmap_iter_linear_io io;
1565ca02815Sjsg } _dst_iter, _src_iter;
1575ca02815Sjsg struct ttm_kmap_iter *dst_iter, *src_iter;
1581bb76ff1Sjsg bool clear;
1595ca02815Sjsg int ret = 0;
1601099013bSjsg
161f005ef32Sjsg if (WARN_ON(!src_mem))
162f005ef32Sjsg return -EINVAL;
1631bb76ff1Sjsg
1641bb76ff1Sjsg src_man = ttm_manager_type(bdev, src_mem->mem_type);
1651bb76ff1Sjsg if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
1665ca02815Sjsg dst_man->use_tt)) {
167ad8b1aafSjsg ret = ttm_tt_populate(bdev, ttm, ctx);
1682c2de840Sjsg if (ret)
1695ca02815Sjsg return ret;
1701099013bSjsg }
1711099013bSjsg
1725ca02815Sjsg dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
1735ca02815Sjsg if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
1745ca02815Sjsg dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
1755ca02815Sjsg if (IS_ERR(dst_iter))
1765ca02815Sjsg return PTR_ERR(dst_iter);
1771099013bSjsg
1785ca02815Sjsg src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
1795ca02815Sjsg if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
1805ca02815Sjsg src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
1815ca02815Sjsg if (IS_ERR(src_iter)) {
1825ca02815Sjsg ret = PTR_ERR(src_iter);
1835ca02815Sjsg goto out_src_iter;
1841099013bSjsg }
1851099013bSjsg
1861bb76ff1Sjsg clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
1871bb76ff1Sjsg if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
188f005ef32Sjsg ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter,
1891bb76ff1Sjsg bdev->memt);
1901099013bSjsg
1915ca02815Sjsg if (!src_iter->ops->maps_tt)
1925ca02815Sjsg ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
1935ca02815Sjsg ttm_bo_move_sync_cleanup(bo, dst_mem);
194ad8b1aafSjsg
1955ca02815Sjsg out_src_iter:
1965ca02815Sjsg if (!dst_iter->ops->maps_tt)
1975ca02815Sjsg ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
1981099013bSjsg
1991099013bSjsg return ret;
2001099013bSjsg }
2011099013bSjsg EXPORT_SYMBOL(ttm_bo_move_memcpy);
2021099013bSjsg
ttm_transfered_destroy(struct ttm_buffer_object * bo)203e4d605f7Sjsg static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
2041099013bSjsg {
2057f4dd379Sjsg struct ttm_transfer_obj *fbo;
2067f4dd379Sjsg
2077f4dd379Sjsg fbo = container_of(bo, struct ttm_transfer_obj, base);
2085ca02815Sjsg dma_resv_fini(&fbo->base.base._resv);
2097f4dd379Sjsg ttm_bo_put(fbo->bo);
2107f4dd379Sjsg kfree(fbo);
2111099013bSjsg }
2121099013bSjsg
2131099013bSjsg /**
2141099013bSjsg * ttm_buffer_object_transfer
2151099013bSjsg *
2161099013bSjsg * @bo: A pointer to a struct ttm_buffer_object.
2171099013bSjsg * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
2181099013bSjsg * holding the data of @bo with the old placement.
2191099013bSjsg *
2201099013bSjsg * This is a utility function that may be called after an accelerated move
2211099013bSjsg * has been scheduled. A new buffer object is created as a placeholder for
2221099013bSjsg * the old data while it's being copied. When that buffer object is idle,
2231099013bSjsg * it can be destroyed, releasing the space of the old placement.
2241099013bSjsg * Returns:
2251099013bSjsg * !0: Failure.
2261099013bSjsg */
2271099013bSjsg
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)2281099013bSjsg static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
2291099013bSjsg struct ttm_buffer_object **new_obj)
2301099013bSjsg {
2317f4dd379Sjsg struct ttm_transfer_obj *fbo;
2327ccd5a2cSjsg int ret;
2331099013bSjsg
234c547faa2Sjsg fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
2351099013bSjsg if (!fbo)
2361099013bSjsg return -ENOMEM;
2371099013bSjsg
2387f4dd379Sjsg fbo->base = *bo;
2397f4dd379Sjsg
2401099013bSjsg /**
2411099013bSjsg * Fix up members that we shouldn't copy directly:
2421099013bSjsg * TODO: Explicit member copy would probably be better here.
2431099013bSjsg */
2441099013bSjsg
2455ca02815Sjsg atomic_inc(&ttm_glob.bo_count);
246c349dbc7Sjsg drm_vma_node_reset(&fbo->base.base.vma_node);
2471099013bSjsg
2487f4dd379Sjsg kref_init(&fbo->base.kref);
2497f4dd379Sjsg fbo->base.destroy = &ttm_transfered_destroy;
2505ca02815Sjsg fbo->base.pin_count = 0;
251c349dbc7Sjsg if (bo->type != ttm_bo_type_sg)
252c349dbc7Sjsg fbo->base.base.resv = &fbo->base.base._resv;
253c349dbc7Sjsg
254c349dbc7Sjsg dma_resv_init(&fbo->base.base._resv);
255c349dbc7Sjsg fbo->base.base.dev = NULL;
256c349dbc7Sjsg ret = dma_resv_trylock(&fbo->base.base._resv);
2577ccd5a2cSjsg WARN_ON(!ret);
2581099013bSjsg
2591bb76ff1Sjsg if (fbo->base.resource) {
2601bb76ff1Sjsg ttm_resource_set_bo(fbo->base.resource, &fbo->base);
2611bb76ff1Sjsg bo->resource = NULL;
2621bb76ff1Sjsg ttm_bo_set_bulk_move(&fbo->base, NULL);
2631bb76ff1Sjsg } else {
2641bb76ff1Sjsg fbo->base.bulk_move = NULL;
2651bb76ff1Sjsg }
2661bb76ff1Sjsg
2671bb76ff1Sjsg ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
2681bb76ff1Sjsg if (ret) {
2691bb76ff1Sjsg kfree(fbo);
2701bb76ff1Sjsg return ret;
2711bb76ff1Sjsg }
2721bb76ff1Sjsg
2731bb76ff1Sjsg ttm_bo_get(bo);
2741bb76ff1Sjsg fbo->bo = bo;
2751bb76ff1Sjsg
2765ca02815Sjsg ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
2775ca02815Sjsg
2787f4dd379Sjsg *new_obj = &fbo->base;
2791099013bSjsg return 0;
2801099013bSjsg }
2811099013bSjsg
282f005ef32Sjsg /**
283f005ef32Sjsg * ttm_io_prot
284f005ef32Sjsg *
285f005ef32Sjsg * @bo: ttm buffer object
286f005ef32Sjsg * @res: ttm resource object
287f005ef32Sjsg * @tmp: Page protection flag for a normal, cached mapping.
288f005ef32Sjsg *
289f005ef32Sjsg * Utility function that returns the pgprot_t that should be used for
290f005ef32Sjsg * setting up a PTE with the caching model indicated by @c_state.
291f005ef32Sjsg */
ttm_io_prot(struct ttm_buffer_object * bo,struct ttm_resource * res,pgprot_t tmp)2925ca02815Sjsg pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
2935ca02815Sjsg pgprot_t tmp)
2947ccd5a2cSjsg {
2955ca02815Sjsg struct ttm_resource_manager *man;
2965ca02815Sjsg enum ttm_caching caching;
2977ccd5a2cSjsg
2985ca02815Sjsg man = ttm_manager_type(bo->bdev, res->mem_type);
299*5f11f933Sjsg if (man->use_tt) {
300*5f11f933Sjsg caching = bo->ttm->caching;
301*5f11f933Sjsg if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
302*5f11f933Sjsg tmp = pgprot_decrypted(tmp);
303*5f11f933Sjsg } else {
304*5f11f933Sjsg caching = res->bus.caching;
305*5f11f933Sjsg }
3065ca02815Sjsg
3075ca02815Sjsg return ttm_prot_from_caching(caching, tmp);
3087ccd5a2cSjsg }
3097ccd5a2cSjsg EXPORT_SYMBOL(ttm_io_prot);
3101099013bSjsg
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)3111099013bSjsg static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
3121099013bSjsg unsigned long offset,
3131099013bSjsg unsigned long size,
3141099013bSjsg struct ttm_bo_kmap_obj *map)
3151099013bSjsg {
316f3eef2b6Sderaadt int flags;
3175ca02815Sjsg struct ttm_resource *mem = bo->resource;
3181099013bSjsg
3195ca02815Sjsg if (bo->resource->bus.addr) {
3201099013bSjsg map->bo_kmap_type = ttm_bo_map_premapped;
3215ca02815Sjsg map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
3221099013bSjsg } else {
3231099013bSjsg map->bo_kmap_type = ttm_bo_map_iomap;
3245ca02815Sjsg if (mem->bus.caching == ttm_write_combined)
3251099013bSjsg flags = BUS_SPACE_MAP_PREFETCHABLE;
3265ca02815Sjsg #ifdef CONFIG_X86
3275ca02815Sjsg else if (mem->bus.caching == ttm_cached)
3285ca02815Sjsg flags = BUS_SPACE_MAP_CACHEABLE;
3295ca02815Sjsg #endif
3301099013bSjsg else
3311099013bSjsg flags = 0;
3321099013bSjsg if (bus_space_map(bo->bdev->memt,
3335ca02815Sjsg bo->resource->bus.offset + offset,
3341099013bSjsg size, BUS_SPACE_MAP_LINEAR | flags,
3355ca02815Sjsg &bo->resource->bus.bsh)) {
3361099013bSjsg printf("%s bus_space_map failed\n", __func__);
3371099013bSjsg map->virtual = 0;
3385ca02815Sjsg } else {
3391099013bSjsg map->virtual = bus_space_vaddr(bo->bdev->memt,
3405ca02815Sjsg bo->resource->bus.bsh);
3415ca02815Sjsg }
3421099013bSjsg }
3431099013bSjsg return (!map->virtual) ? -ENOMEM : 0;
3441099013bSjsg }
3451099013bSjsg
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)3461099013bSjsg static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
3471099013bSjsg unsigned long start_page,
3481099013bSjsg unsigned long num_pages,
3491099013bSjsg struct ttm_bo_kmap_obj *map)
3501099013bSjsg {
3515ca02815Sjsg struct ttm_resource *mem = bo->resource;
3527f4dd379Sjsg struct ttm_operation_ctx ctx = {
3537f4dd379Sjsg .interruptible = false,
3547f4dd379Sjsg .no_wait_gpu = false
3557f4dd379Sjsg };
3561099013bSjsg struct ttm_tt *ttm = bo->ttm;
357*5f11f933Sjsg struct ttm_resource_manager *man =
358*5f11f933Sjsg ttm_manager_type(bo->bdev, bo->resource->mem_type);
3597f4dd379Sjsg pgprot_t prot;
3601099013bSjsg int ret;
3611099013bSjsg
3621099013bSjsg BUG_ON(!ttm);
3631099013bSjsg
364ad8b1aafSjsg ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
3651099013bSjsg if (ret)
3661099013bSjsg return ret;
3671099013bSjsg
368*5f11f933Sjsg if (num_pages == 1 && ttm->caching == ttm_cached &&
369*5f11f933Sjsg !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
3701099013bSjsg /*
3711099013bSjsg * We're mapping a single page, and the desired
3721099013bSjsg * page protection is consistent with the bo.
3731099013bSjsg */
3741099013bSjsg
3751099013bSjsg map->bo_kmap_type = ttm_bo_map_kmap;
3761099013bSjsg map->page = ttm->pages[start_page];
3771099013bSjsg map->virtual = kmap(map->page);
3781099013bSjsg } else {
3791099013bSjsg /*
3801099013bSjsg * We need to use vmap to get the desired page protection
3811099013bSjsg * or to make the buffer object look contiguous.
3821099013bSjsg */
3835ca02815Sjsg prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
3841099013bSjsg map->bo_kmap_type = ttm_bo_map_vmap;
3851099013bSjsg map->virtual = vmap(ttm->pages + start_page, num_pages,
3861099013bSjsg 0, prot);
3871099013bSjsg }
3881099013bSjsg return (!map->virtual) ? -ENOMEM : 0;
3891099013bSjsg }
3901099013bSjsg
391f005ef32Sjsg /**
392f005ef32Sjsg * ttm_bo_kmap
393f005ef32Sjsg *
394f005ef32Sjsg * @bo: The buffer object.
395f005ef32Sjsg * @start_page: The first page to map.
396f005ef32Sjsg * @num_pages: Number of pages to map.
397f005ef32Sjsg * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
398f005ef32Sjsg *
399f005ef32Sjsg * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
400f005ef32Sjsg * data in the buffer object. The ttm_kmap_obj_virtual function can then be
401f005ef32Sjsg * used to obtain a virtual address to the data.
402f005ef32Sjsg *
403f005ef32Sjsg * Returns
404f005ef32Sjsg * -ENOMEM: Out of memory.
405f005ef32Sjsg * -EINVAL: Invalid range.
406f005ef32Sjsg */
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)4071099013bSjsg int ttm_bo_kmap(struct ttm_buffer_object *bo,
4081099013bSjsg unsigned long start_page, unsigned long num_pages,
4091099013bSjsg struct ttm_bo_kmap_obj *map)
4101099013bSjsg {
4111099013bSjsg unsigned long offset, size;
4121099013bSjsg int ret;
4131099013bSjsg
4141099013bSjsg map->virtual = NULL;
4151099013bSjsg map->bo = bo;
416f005ef32Sjsg if (num_pages > PFN_UP(bo->resource->size))
4171099013bSjsg return -EINVAL;
418f005ef32Sjsg if ((start_page + num_pages) > PFN_UP(bo->resource->size))
4191099013bSjsg return -EINVAL;
420c349dbc7Sjsg
4215ca02815Sjsg ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
4221099013bSjsg if (ret)
4231099013bSjsg return ret;
4245ca02815Sjsg if (!bo->resource->bus.is_iomem) {
4251099013bSjsg return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
4261099013bSjsg } else {
4271099013bSjsg offset = start_page << PAGE_SHIFT;
4281099013bSjsg size = num_pages << PAGE_SHIFT;
4291099013bSjsg return ttm_bo_ioremap(bo, offset, size, map);
4301099013bSjsg }
4311099013bSjsg }
4321099013bSjsg EXPORT_SYMBOL(ttm_bo_kmap);
4331099013bSjsg
434f005ef32Sjsg /**
435f005ef32Sjsg * ttm_bo_kunmap
436f005ef32Sjsg *
437f005ef32Sjsg * @map: Object describing the map to unmap.
438f005ef32Sjsg *
439f005ef32Sjsg * Unmaps a kernel map set up by ttm_bo_kmap.
440f005ef32Sjsg */
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)4411099013bSjsg void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
4421099013bSjsg {
4431099013bSjsg if (!map->virtual)
4441099013bSjsg return;
4451099013bSjsg switch (map->bo_kmap_type) {
4461099013bSjsg case ttm_bo_map_iomap:
4475ca02815Sjsg bus_space_unmap(map->bo->bdev->memt, map->bo->resource->bus.bsh,
448f005ef32Sjsg map->bo->resource->size);
4491099013bSjsg break;
4501099013bSjsg case ttm_bo_map_vmap:
451ad8b1aafSjsg vunmap(map->virtual,
452f005ef32Sjsg map->bo->resource->size);
4531099013bSjsg break;
4541099013bSjsg case ttm_bo_map_kmap:
4556942ea66Sjsg kunmap_va(map->virtual);
4561099013bSjsg break;
4571099013bSjsg case ttm_bo_map_premapped:
4581099013bSjsg break;
4591099013bSjsg default:
4601099013bSjsg BUG();
4611099013bSjsg }
4625ca02815Sjsg ttm_mem_io_free(map->bo->bdev, map->bo->resource);
4631099013bSjsg map->virtual = NULL;
4641099013bSjsg map->page = NULL;
4651099013bSjsg }
4661099013bSjsg EXPORT_SYMBOL(ttm_bo_kunmap);
4671099013bSjsg
468f005ef32Sjsg /**
469f005ef32Sjsg * ttm_bo_vmap
470f005ef32Sjsg *
471f005ef32Sjsg * @bo: The buffer object.
472f005ef32Sjsg * @map: pointer to a struct iosys_map representing the map.
473f005ef32Sjsg *
474f005ef32Sjsg * Sets up a kernel virtual mapping, using ioremap or vmap to the
475f005ef32Sjsg * data in the buffer object. The parameter @map returns the virtual
476f005ef32Sjsg * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
477f005ef32Sjsg *
478f005ef32Sjsg * Returns
479f005ef32Sjsg * -ENOMEM: Out of memory.
480f005ef32Sjsg * -EINVAL: Invalid range.
481f005ef32Sjsg */
ttm_bo_vmap(struct ttm_buffer_object * bo,struct iosys_map * map)4821bb76ff1Sjsg int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
4835ca02815Sjsg {
4845ca02815Sjsg int flags;
4855ca02815Sjsg struct ttm_resource *mem = bo->resource;
4865ca02815Sjsg int ret;
4875ca02815Sjsg
4881bb76ff1Sjsg dma_resv_assert_held(bo->base.resv);
4891bb76ff1Sjsg
4905ca02815Sjsg ret = ttm_mem_io_reserve(bo->bdev, mem);
4915ca02815Sjsg if (ret)
4925ca02815Sjsg return ret;
4935ca02815Sjsg
4945ca02815Sjsg if (mem->bus.is_iomem) {
4955ca02815Sjsg void __iomem *vaddr_iomem;
4965ca02815Sjsg
4975ca02815Sjsg if (mem->bus.addr)
4985ca02815Sjsg vaddr_iomem = (void __iomem *)mem->bus.addr;
4995ca02815Sjsg else {
5005ca02815Sjsg if (mem->bus.caching == ttm_write_combined)
5015ca02815Sjsg flags = BUS_SPACE_MAP_PREFETCHABLE;
5025ca02815Sjsg #ifdef CONFIG_X86
5035ca02815Sjsg else if (mem->bus.caching == ttm_cached)
5045ca02815Sjsg flags = BUS_SPACE_MAP_CACHEABLE;
5055ca02815Sjsg #endif
5065ca02815Sjsg else
5075ca02815Sjsg flags = 0;
5085ca02815Sjsg if (bus_space_map(bo->bdev->memt, mem->bus.offset,
5095ca02815Sjsg bo->base.size, BUS_SPACE_MAP_LINEAR | flags,
5105ca02815Sjsg &mem->bus.bsh)) {
5115ca02815Sjsg printf("%s bus_space_map failed\n", __func__);
5125ca02815Sjsg return -ENOMEM;
5135ca02815Sjsg }
5145ca02815Sjsg vaddr_iomem = bus_space_vaddr(bo->bdev->memt,
5155ca02815Sjsg mem->bus.bsh);
5165ca02815Sjsg }
5175ca02815Sjsg
5185ca02815Sjsg if (!vaddr_iomem)
5195ca02815Sjsg return -ENOMEM;
5205ca02815Sjsg
5211bb76ff1Sjsg iosys_map_set_vaddr_iomem(map, vaddr_iomem);
5225ca02815Sjsg
5235ca02815Sjsg } else {
5245ca02815Sjsg struct ttm_operation_ctx ctx = {
5255ca02815Sjsg .interruptible = false,
5265ca02815Sjsg .no_wait_gpu = false
5275ca02815Sjsg };
5285ca02815Sjsg struct ttm_tt *ttm = bo->ttm;
5295ca02815Sjsg pgprot_t prot;
5305ca02815Sjsg void *vaddr;
5315ca02815Sjsg
5325ca02815Sjsg ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
5335ca02815Sjsg if (ret)
5345ca02815Sjsg return ret;
5355ca02815Sjsg
5365ca02815Sjsg /*
5375ca02815Sjsg * We need to use vmap to get the desired page protection
5385ca02815Sjsg * or to make the buffer object look contiguous.
5395ca02815Sjsg */
5405ca02815Sjsg prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
5415ca02815Sjsg vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
5425ca02815Sjsg if (!vaddr)
5435ca02815Sjsg return -ENOMEM;
5445ca02815Sjsg
5451bb76ff1Sjsg iosys_map_set_vaddr(map, vaddr);
5465ca02815Sjsg }
5475ca02815Sjsg
5485ca02815Sjsg return 0;
5495ca02815Sjsg }
5505ca02815Sjsg EXPORT_SYMBOL(ttm_bo_vmap);
5515ca02815Sjsg
552f005ef32Sjsg /**
553f005ef32Sjsg * ttm_bo_vunmap
554f005ef32Sjsg *
555f005ef32Sjsg * @bo: The buffer object.
556f005ef32Sjsg * @map: Object describing the map to unmap.
557f005ef32Sjsg *
558f005ef32Sjsg * Unmaps a kernel map set up by ttm_bo_vmap().
559f005ef32Sjsg */
ttm_bo_vunmap(struct ttm_buffer_object * bo,struct iosys_map * map)5601bb76ff1Sjsg void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
5615ca02815Sjsg {
5625ca02815Sjsg struct ttm_resource *mem = bo->resource;
5635ca02815Sjsg
5641bb76ff1Sjsg dma_resv_assert_held(bo->base.resv);
5651bb76ff1Sjsg
5661bb76ff1Sjsg if (iosys_map_is_null(map))
5675ca02815Sjsg return;
5685ca02815Sjsg
5695ca02815Sjsg if (!map->is_iomem)
5705ca02815Sjsg vunmap(map->vaddr,
571f005ef32Sjsg bo->base.size);
5725ca02815Sjsg else if (!mem->bus.addr)
5735ca02815Sjsg bus_space_unmap(bo->bdev->memt, mem->bus.bsh,
574f005ef32Sjsg bo->base.size);
5751bb76ff1Sjsg iosys_map_clear(map);
5765ca02815Sjsg
5775ca02815Sjsg ttm_mem_io_free(bo->bdev, bo->resource);
5785ca02815Sjsg }
5795ca02815Sjsg EXPORT_SYMBOL(ttm_bo_vunmap);
5805ca02815Sjsg
ttm_bo_wait_free_node(struct ttm_buffer_object * bo,bool dst_use_tt)581ad8b1aafSjsg static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
582ad8b1aafSjsg bool dst_use_tt)
5831099013bSjsg {
584f005ef32Sjsg long ret;
585f005ef32Sjsg
586f005ef32Sjsg ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
587f005ef32Sjsg false, 15 * HZ);
588f005ef32Sjsg if (ret == 0)
589f005ef32Sjsg return -EBUSY;
590f005ef32Sjsg if (ret < 0)
5911099013bSjsg return ret;
5921099013bSjsg
593ad8b1aafSjsg if (!dst_use_tt)
594ad8b1aafSjsg ttm_bo_tt_destroy(bo);
5955ca02815Sjsg ttm_resource_free(bo, &bo->resource);
5961099013bSjsg return 0;
5971099013bSjsg }
5987f4dd379Sjsg
ttm_bo_move_to_ghost(struct ttm_buffer_object * bo,struct dma_fence * fence,bool dst_use_tt)599ad8b1aafSjsg static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
600ad8b1aafSjsg struct dma_fence *fence,
601ad8b1aafSjsg bool dst_use_tt)
6027f4dd379Sjsg {
6037f4dd379Sjsg struct ttm_buffer_object *ghost_obj;
604ad8b1aafSjsg int ret;
6057f4dd379Sjsg
6067f4dd379Sjsg /**
6077f4dd379Sjsg * This should help pipeline ordinary buffer moves.
6087f4dd379Sjsg *
6097f4dd379Sjsg * Hang old buffer memory on a new buffer object,
6107f4dd379Sjsg * and leave it to be released when the GPU
6117f4dd379Sjsg * operation has completed.
6127f4dd379Sjsg */
6137f4dd379Sjsg
6147f4dd379Sjsg ret = ttm_buffer_object_transfer(bo, &ghost_obj);
6157f4dd379Sjsg if (ret)
6167f4dd379Sjsg return ret;
6177f4dd379Sjsg
6181bb76ff1Sjsg dma_resv_add_fence(&ghost_obj->base._resv, fence,
6191bb76ff1Sjsg DMA_RESV_USAGE_KERNEL);
6207f4dd379Sjsg
6217f4dd379Sjsg /**
6227f4dd379Sjsg * If we're not moving to fixed memory, the TTM object
6237f4dd379Sjsg * needs to stay alive. Otherwhise hang it on the ghost
6247f4dd379Sjsg * bo to be unbound and destroyed.
6257f4dd379Sjsg */
6267f4dd379Sjsg
627ad8b1aafSjsg if (dst_use_tt)
6287f4dd379Sjsg ghost_obj->ttm = NULL;
6297f4dd379Sjsg else
6307f4dd379Sjsg bo->ttm = NULL;
6317f4dd379Sjsg
632c349dbc7Sjsg dma_resv_unlock(&ghost_obj->base._resv);
6337f4dd379Sjsg ttm_bo_put(ghost_obj);
634ad8b1aafSjsg return 0;
635ad8b1aafSjsg }
6367f4dd379Sjsg
ttm_bo_move_pipeline_evict(struct ttm_buffer_object * bo,struct dma_fence * fence)637ad8b1aafSjsg static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
638ad8b1aafSjsg struct dma_fence *fence)
639ad8b1aafSjsg {
6405ca02815Sjsg struct ttm_device *bdev = bo->bdev;
6415ca02815Sjsg struct ttm_resource_manager *from;
6425ca02815Sjsg
6435ca02815Sjsg from = ttm_manager_type(bdev, bo->resource->mem_type);
6447f4dd379Sjsg
6457f4dd379Sjsg /**
6467f4dd379Sjsg * BO doesn't have a TTM we need to bind/unbind. Just remember
6477f4dd379Sjsg * this eviction and free up the allocation
6487f4dd379Sjsg */
6497f4dd379Sjsg spin_lock(&from->move_lock);
6507f4dd379Sjsg if (!from->move || dma_fence_is_later(fence, from->move)) {
6517f4dd379Sjsg dma_fence_put(from->move);
6527f4dd379Sjsg from->move = dma_fence_get(fence);
6537f4dd379Sjsg }
6547f4dd379Sjsg spin_unlock(&from->move_lock);
6557f4dd379Sjsg
6565ca02815Sjsg ttm_resource_free(bo, &bo->resource);
657ad8b1aafSjsg }
6587f4dd379Sjsg
659f005ef32Sjsg /**
660f005ef32Sjsg * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
661f005ef32Sjsg *
662f005ef32Sjsg * @bo: A pointer to a struct ttm_buffer_object.
663f005ef32Sjsg * @fence: A fence object that signals when moving is complete.
664f005ef32Sjsg * @evict: This is an evict move. Don't return until the buffer is idle.
665f005ef32Sjsg * @pipeline: evictions are to be pipelined.
666f005ef32Sjsg * @new_mem: struct ttm_resource indicating where to move.
667f005ef32Sjsg *
668f005ef32Sjsg * Accelerated move function to be called when an accelerated move
669f005ef32Sjsg * has been scheduled. The function will create a new temporary buffer object
670f005ef32Sjsg * representing the old placement, and put the sync object on both buffer
671f005ef32Sjsg * objects. After that the newly created buffer object is unref'd to be
672f005ef32Sjsg * destroyed when the move is complete. This will help pipeline
673f005ef32Sjsg * buffer moves.
674f005ef32Sjsg */
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,bool pipeline,struct ttm_resource * new_mem)675ad8b1aafSjsg int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
676ad8b1aafSjsg struct dma_fence *fence,
677ad8b1aafSjsg bool evict,
678ad8b1aafSjsg bool pipeline,
679ad8b1aafSjsg struct ttm_resource *new_mem)
680ad8b1aafSjsg {
6815ca02815Sjsg struct ttm_device *bdev = bo->bdev;
6825ca02815Sjsg struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
683ad8b1aafSjsg struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
684ad8b1aafSjsg int ret = 0;
6857f4dd379Sjsg
6861bb76ff1Sjsg dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
687ad8b1aafSjsg if (!evict)
688ad8b1aafSjsg ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
689ad8b1aafSjsg else if (!from->use_tt && pipeline)
690ad8b1aafSjsg ttm_bo_move_pipeline_evict(bo, fence);
691ad8b1aafSjsg else
692ad8b1aafSjsg ret = ttm_bo_wait_free_node(bo, man->use_tt);
693ad8b1aafSjsg
6947f4dd379Sjsg if (ret)
6957f4dd379Sjsg return ret;
6967f4dd379Sjsg
697ad8b1aafSjsg ttm_bo_assign_mem(bo, new_mem);
6987f4dd379Sjsg
6997f4dd379Sjsg return 0;
7007f4dd379Sjsg }
701ad8b1aafSjsg EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
7027f4dd379Sjsg
703f005ef32Sjsg /**
704f005ef32Sjsg * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
705f005ef32Sjsg *
706f005ef32Sjsg * @bo: A pointer to a struct ttm_buffer_object.
707f005ef32Sjsg * @new_mem: struct ttm_resource indicating where to move.
708f005ef32Sjsg *
709f005ef32Sjsg * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
710f005ef32Sjsg * by the caller to be idle. Typically used after memcpy buffer moves.
711f005ef32Sjsg */
ttm_bo_move_sync_cleanup(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)7121bb76ff1Sjsg void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
7131bb76ff1Sjsg struct ttm_resource *new_mem)
7141bb76ff1Sjsg {
7151bb76ff1Sjsg struct ttm_device *bdev = bo->bdev;
7161bb76ff1Sjsg struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
7171bb76ff1Sjsg int ret;
7181bb76ff1Sjsg
7191bb76ff1Sjsg ret = ttm_bo_wait_free_node(bo, man->use_tt);
7201bb76ff1Sjsg if (WARN_ON(ret))
7211bb76ff1Sjsg return;
7221bb76ff1Sjsg
7231bb76ff1Sjsg ttm_bo_assign_mem(bo, new_mem);
7241bb76ff1Sjsg }
7251bb76ff1Sjsg EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
7261bb76ff1Sjsg
7275ca02815Sjsg /**
7285ca02815Sjsg * ttm_bo_pipeline_gutting - purge the contents of a bo
7295ca02815Sjsg * @bo: The buffer object
7305ca02815Sjsg *
7315ca02815Sjsg * Purge the contents of a bo, async if the bo is not idle.
7325ca02815Sjsg * After a successful call, the bo is left unpopulated in
7335ca02815Sjsg * system placement. The function may wait uninterruptible
7345ca02815Sjsg * for idle on OOM.
7355ca02815Sjsg *
7365ca02815Sjsg * Return: 0 if successful, negative error code on failure.
7375ca02815Sjsg */
ttm_bo_pipeline_gutting(struct ttm_buffer_object * bo)7387f4dd379Sjsg int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
7397f4dd379Sjsg {
7407f4dd379Sjsg struct ttm_buffer_object *ghost;
7415ca02815Sjsg struct ttm_tt *ttm;
7427f4dd379Sjsg int ret;
7437f4dd379Sjsg
7445ca02815Sjsg /* If already idle, no need for ghost object dance. */
745f005ef32Sjsg if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
7465ca02815Sjsg if (!bo->ttm) {
7475ca02815Sjsg /* See comment below about clearing. */
7485ca02815Sjsg ret = ttm_tt_create(bo, true);
7495ca02815Sjsg if (ret)
750f005ef32Sjsg return ret;
7515ca02815Sjsg } else {
7525ca02815Sjsg ttm_tt_unpopulate(bo->bdev, bo->ttm);
7535ca02815Sjsg if (bo->type == ttm_bo_type_device)
7545ca02815Sjsg ttm_tt_mark_for_clear(bo->ttm);
7555ca02815Sjsg }
7565ca02815Sjsg ttm_resource_free(bo, &bo->resource);
7575ca02815Sjsg return 0;
7585ca02815Sjsg }
7595ca02815Sjsg
7605ca02815Sjsg /*
7615ca02815Sjsg * We need an unpopulated ttm_tt after giving our current one,
7625ca02815Sjsg * if any, to the ghost object. And we can't afford to fail
7635ca02815Sjsg * creating one *after* the operation. If the bo subsequently gets
7645ca02815Sjsg * resurrected, make sure it's cleared (if ttm_bo_type_device)
7655ca02815Sjsg * to avoid leaking sensitive information to user-space.
7665ca02815Sjsg */
7675ca02815Sjsg
7685ca02815Sjsg ttm = bo->ttm;
7695ca02815Sjsg bo->ttm = NULL;
7705ca02815Sjsg ret = ttm_tt_create(bo, true);
7715ca02815Sjsg swap(bo->ttm, ttm);
7725ca02815Sjsg if (ret)
773f005ef32Sjsg return ret;
7745ca02815Sjsg
7757f4dd379Sjsg ret = ttm_buffer_object_transfer(bo, &ghost);
7767f4dd379Sjsg if (ret)
7775ca02815Sjsg goto error_destroy_tt;
7787f4dd379Sjsg
779c349dbc7Sjsg ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
7807f4dd379Sjsg /* Last resort, wait for the BO to be idle when we are OOM */
781f005ef32Sjsg if (ret) {
782f005ef32Sjsg dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
783f005ef32Sjsg false, MAX_SCHEDULE_TIMEOUT);
784f005ef32Sjsg }
7857f4dd379Sjsg
786c349dbc7Sjsg dma_resv_unlock(&ghost->base._resv);
7877f4dd379Sjsg ttm_bo_put(ghost);
7885ca02815Sjsg bo->ttm = ttm;
7897f4dd379Sjsg return 0;
7905ca02815Sjsg
7915ca02815Sjsg error_destroy_tt:
7925ca02815Sjsg ttm_tt_destroy(bo->bdev, ttm);
7935ca02815Sjsg return ret;
7947f4dd379Sjsg }
795