xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_migrate.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
15ca02815Sjsg // SPDX-License-Identifier: MIT
25ca02815Sjsg /*
35ca02815Sjsg  * Copyright © 2020 Intel Corporation
45ca02815Sjsg  */
55ca02815Sjsg 
65ca02815Sjsg #include "i915_drv.h"
75ca02815Sjsg #include "intel_context.h"
85ca02815Sjsg #include "intel_gpu_commands.h"
95ca02815Sjsg #include "intel_gt.h"
105ca02815Sjsg #include "intel_gtt.h"
115ca02815Sjsg #include "intel_migrate.h"
125ca02815Sjsg #include "intel_ring.h"
13*f005ef32Sjsg #include "gem/i915_gem_lmem.h"
145ca02815Sjsg 
155ca02815Sjsg struct insert_pte_data {
165ca02815Sjsg 	u64 offset;
175ca02815Sjsg };
185ca02815Sjsg 
195ca02815Sjsg #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
205ca02815Sjsg 
211bb76ff1Sjsg #define GET_CCS_BYTES(i915, size)	(HAS_FLAT_CCS(i915) ? \
221bb76ff1Sjsg 					 DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
engine_supports_migration(struct intel_engine_cs * engine)235ca02815Sjsg static bool engine_supports_migration(struct intel_engine_cs *engine)
245ca02815Sjsg {
255ca02815Sjsg 	if (!engine)
265ca02815Sjsg 		return false;
275ca02815Sjsg 
285ca02815Sjsg 	/*
295ca02815Sjsg 	 * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
305ca02815Sjsg 	 * the ability to write PTE using inline data (MI_STORE_DATA)
315ca02815Sjsg 	 * and of course the ability to do the block transfer (blits).
325ca02815Sjsg 	 */
335ca02815Sjsg 	GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
345ca02815Sjsg 
355ca02815Sjsg 	return true;
365ca02815Sjsg }
375ca02815Sjsg 
xehpsdv_toggle_pdes(struct i915_address_space * vm,struct i915_page_table * pt,void * data)381bb76ff1Sjsg static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
391bb76ff1Sjsg 				struct i915_page_table *pt,
401bb76ff1Sjsg 				void *data)
411bb76ff1Sjsg {
421bb76ff1Sjsg 	struct insert_pte_data *d = data;
431bb76ff1Sjsg 
441bb76ff1Sjsg 	/*
451bb76ff1Sjsg 	 * Insert a dummy PTE into every PT that will map to LMEM to ensure
461bb76ff1Sjsg 	 * we have a correctly setup PDE structure for later use.
471bb76ff1Sjsg 	 */
48*f005ef32Sjsg 	vm->insert_page(vm, 0, d->offset,
49*f005ef32Sjsg 			i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
50*f005ef32Sjsg 			PTE_LM);
511bb76ff1Sjsg 	GEM_BUG_ON(!pt->is_compact);
521bb76ff1Sjsg 	d->offset += SZ_2M;
531bb76ff1Sjsg }
541bb76ff1Sjsg 
xehpsdv_insert_pte(struct i915_address_space * vm,struct i915_page_table * pt,void * data)551bb76ff1Sjsg static void xehpsdv_insert_pte(struct i915_address_space *vm,
561bb76ff1Sjsg 			       struct i915_page_table *pt,
571bb76ff1Sjsg 			       void *data)
581bb76ff1Sjsg {
591bb76ff1Sjsg 	struct insert_pte_data *d = data;
601bb76ff1Sjsg 
611bb76ff1Sjsg 	/*
621bb76ff1Sjsg 	 * We are playing tricks here, since the actual pt, from the hw
631bb76ff1Sjsg 	 * pov, is only 256bytes with 32 entries, or 4096bytes with 512
641bb76ff1Sjsg 	 * entries, but we are still guaranteed that the physical
651bb76ff1Sjsg 	 * alignment is 64K underneath for the pt, and we are careful
661bb76ff1Sjsg 	 * not to access the space in the void.
671bb76ff1Sjsg 	 */
68*f005ef32Sjsg 	vm->insert_page(vm, px_dma(pt), d->offset,
69*f005ef32Sjsg 			i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
70*f005ef32Sjsg 			PTE_LM);
711bb76ff1Sjsg 	d->offset += SZ_64K;
721bb76ff1Sjsg }
731bb76ff1Sjsg 
insert_pte(struct i915_address_space * vm,struct i915_page_table * pt,void * data)745ca02815Sjsg static void insert_pte(struct i915_address_space *vm,
755ca02815Sjsg 		       struct i915_page_table *pt,
765ca02815Sjsg 		       void *data)
775ca02815Sjsg {
785ca02815Sjsg 	struct insert_pte_data *d = data;
795ca02815Sjsg 
80*f005ef32Sjsg 	vm->insert_page(vm, px_dma(pt), d->offset,
81*f005ef32Sjsg 			i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
821bb76ff1Sjsg 			i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
835ca02815Sjsg 	d->offset += PAGE_SIZE;
845ca02815Sjsg }
855ca02815Sjsg 
migrate_vm(struct intel_gt * gt)865ca02815Sjsg static struct i915_address_space *migrate_vm(struct intel_gt *gt)
875ca02815Sjsg {
885ca02815Sjsg 	struct i915_vm_pt_stash stash = {};
895ca02815Sjsg 	struct i915_ppgtt *vm;
905ca02815Sjsg 	int err;
915ca02815Sjsg 	int i;
925ca02815Sjsg 
935ca02815Sjsg 	/*
945ca02815Sjsg 	 * We construct a very special VM for use by all migration contexts,
955ca02815Sjsg 	 * it is kept pinned so that it can be used at any time. As we need
965ca02815Sjsg 	 * to pre-allocate the page directories for the migration VM, this
975ca02815Sjsg 	 * limits us to only using a small number of prepared vma.
985ca02815Sjsg 	 *
995ca02815Sjsg 	 * To be able to pipeline and reschedule migration operations while
1005ca02815Sjsg 	 * avoiding unnecessary contention on the vm itself, the PTE updates
1015ca02815Sjsg 	 * are inline with the blits. All the blits use the same fixed
1025ca02815Sjsg 	 * addresses, with the backing store redirection being updated on the
1035ca02815Sjsg 	 * fly. Only 2 implicit vma are used for all migration operations.
1045ca02815Sjsg 	 *
1055ca02815Sjsg 	 * We lay the ppGTT out as:
1065ca02815Sjsg 	 *
1075ca02815Sjsg 	 *	[0, CHUNK_SZ) -> first object
1085ca02815Sjsg 	 *	[CHUNK_SZ, 2 * CHUNK_SZ) -> second object
1095ca02815Sjsg 	 *	[2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
1105ca02815Sjsg 	 *
1115ca02815Sjsg 	 * By exposing the dma addresses of the page directories themselves
1125ca02815Sjsg 	 * within the ppGTT, we are then able to rewrite the PTE prior to use.
1135ca02815Sjsg 	 * But the PTE update and subsequent migration operation must be atomic,
1145ca02815Sjsg 	 * i.e. within the same non-preemptible window so that we do not switch
1155ca02815Sjsg 	 * to another migration context that overwrites the PTE.
1165ca02815Sjsg 	 *
1171bb76ff1Sjsg 	 * This changes quite a bit on platforms with HAS_64K_PAGES support,
1181bb76ff1Sjsg 	 * where we instead have three windows, each CHUNK_SIZE in size. The
1191bb76ff1Sjsg 	 * first is reserved for mapping system-memory, and that just uses the
1201bb76ff1Sjsg 	 * 512 entry layout using 4K GTT pages. The other two windows just map
1211bb76ff1Sjsg 	 * lmem pages and must use the new compact 32 entry layout using 64K GTT
1221bb76ff1Sjsg 	 * pages, which ensures we can address any lmem object that the user
1231bb76ff1Sjsg 	 * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
1241bb76ff1Sjsg 	 * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
1251bb76ff1Sjsg 	 * compact layout for each of these page-tables, that fall within the
1261bb76ff1Sjsg 	 * [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
1271bb76ff1Sjsg 	 *
1281bb76ff1Sjsg 	 * We lay the ppGTT out as:
1291bb76ff1Sjsg 	 *
1301bb76ff1Sjsg 	 * [0, CHUNK_SZ) -> first window/object, maps smem
1311bb76ff1Sjsg 	 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
1321bb76ff1Sjsg 	 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
1331bb76ff1Sjsg 	 *
1341bb76ff1Sjsg 	 * For the PTE window it's also quite different, since each PTE must
1351bb76ff1Sjsg 	 * point to some 64K page, one for each PT(since it's in lmem), and yet
1361bb76ff1Sjsg 	 * each is only <= 4096bytes, but since the unused space within that PTE
1371bb76ff1Sjsg 	 * range is never touched, this should be fine.
1381bb76ff1Sjsg 	 *
1391bb76ff1Sjsg 	 * So basically each PT now needs 64K of virtual memory, instead of 4K,
1401bb76ff1Sjsg 	 * which looks like:
1411bb76ff1Sjsg 	 *
1421bb76ff1Sjsg 	 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
1435ca02815Sjsg 	 */
1445ca02815Sjsg 
1451bb76ff1Sjsg 	vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
1465ca02815Sjsg 	if (IS_ERR(vm))
1475ca02815Sjsg 		return ERR_CAST(vm);
1485ca02815Sjsg 
1495ca02815Sjsg 	if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
1505ca02815Sjsg 		err = -ENODEV;
1515ca02815Sjsg 		goto err_vm;
1525ca02815Sjsg 	}
1535ca02815Sjsg 
1541bb76ff1Sjsg 	if (HAS_64K_PAGES(gt->i915))
1551bb76ff1Sjsg 		stash.pt_sz = I915_GTT_PAGE_SIZE_64K;
1561bb76ff1Sjsg 
1575ca02815Sjsg 	/*
1585ca02815Sjsg 	 * Each engine instance is assigned its own chunk in the VM, so
1595ca02815Sjsg 	 * that we can run multiple instances concurrently
1605ca02815Sjsg 	 */
1615ca02815Sjsg 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
1625ca02815Sjsg 		struct intel_engine_cs *engine;
1635ca02815Sjsg 		u64 base = (u64)i << 32;
1645ca02815Sjsg 		struct insert_pte_data d = {};
1655ca02815Sjsg 		struct i915_gem_ww_ctx ww;
1665ca02815Sjsg 		u64 sz;
1675ca02815Sjsg 
1685ca02815Sjsg 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
1695ca02815Sjsg 		if (!engine_supports_migration(engine))
1705ca02815Sjsg 			continue;
1715ca02815Sjsg 
1725ca02815Sjsg 		/*
1735ca02815Sjsg 		 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
1745ca02815Sjsg 		 * 4x2 page directories for source/destination.
1755ca02815Sjsg 		 */
1761bb76ff1Sjsg 		if (HAS_64K_PAGES(gt->i915))
1771bb76ff1Sjsg 			sz = 3 * CHUNK_SZ;
1781bb76ff1Sjsg 		else
1795ca02815Sjsg 			sz = 2 * CHUNK_SZ;
1805ca02815Sjsg 		d.offset = base + sz;
1815ca02815Sjsg 
1825ca02815Sjsg 		/*
1835ca02815Sjsg 		 * We need another page directory setup so that we can write
1845ca02815Sjsg 		 * the 8x512 PTE in each chunk.
1855ca02815Sjsg 		 */
1861bb76ff1Sjsg 		if (HAS_64K_PAGES(gt->i915))
1871bb76ff1Sjsg 			sz += (sz / SZ_2M) * SZ_64K;
1881bb76ff1Sjsg 		else
1895ca02815Sjsg 			sz += (sz >> 12) * sizeof(u64);
1905ca02815Sjsg 
1915ca02815Sjsg 		err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
1925ca02815Sjsg 		if (err)
1935ca02815Sjsg 			goto err_vm;
1945ca02815Sjsg 
1955ca02815Sjsg 		for_i915_gem_ww(&ww, err, true) {
1965ca02815Sjsg 			err = i915_vm_lock_objects(&vm->vm, &ww);
1975ca02815Sjsg 			if (err)
1985ca02815Sjsg 				continue;
1995ca02815Sjsg 			err = i915_vm_map_pt_stash(&vm->vm, &stash);
2005ca02815Sjsg 			if (err)
2015ca02815Sjsg 				continue;
2025ca02815Sjsg 
2035ca02815Sjsg 			vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
2045ca02815Sjsg 		}
2055ca02815Sjsg 		i915_vm_free_pt_stash(&vm->vm, &stash);
2065ca02815Sjsg 		if (err)
2075ca02815Sjsg 			goto err_vm;
2085ca02815Sjsg 
2095ca02815Sjsg 		/* Now allow the GPU to rewrite the PTE via its own ppGTT */
2101bb76ff1Sjsg 		if (HAS_64K_PAGES(gt->i915)) {
2111bb76ff1Sjsg 			vm->vm.foreach(&vm->vm, base, d.offset - base,
2121bb76ff1Sjsg 				       xehpsdv_insert_pte, &d);
2131bb76ff1Sjsg 			d.offset = base + CHUNK_SZ;
2141bb76ff1Sjsg 			vm->vm.foreach(&vm->vm,
2151bb76ff1Sjsg 				       d.offset,
2161bb76ff1Sjsg 				       2 * CHUNK_SZ,
2171bb76ff1Sjsg 				       xehpsdv_toggle_pdes, &d);
2181bb76ff1Sjsg 		} else {
2191bb76ff1Sjsg 			vm->vm.foreach(&vm->vm, base, d.offset - base,
2201bb76ff1Sjsg 				       insert_pte, &d);
2211bb76ff1Sjsg 		}
2225ca02815Sjsg 	}
2235ca02815Sjsg 
2245ca02815Sjsg 	return &vm->vm;
2255ca02815Sjsg 
2265ca02815Sjsg err_vm:
2275ca02815Sjsg 	i915_vm_put(&vm->vm);
2285ca02815Sjsg 	return ERR_PTR(err);
2295ca02815Sjsg }
2305ca02815Sjsg 
first_copy_engine(struct intel_gt * gt)2315ca02815Sjsg static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
2325ca02815Sjsg {
2335ca02815Sjsg 	struct intel_engine_cs *engine;
2345ca02815Sjsg 	int i;
2355ca02815Sjsg 
2365ca02815Sjsg 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
2375ca02815Sjsg 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
2385ca02815Sjsg 		if (engine_supports_migration(engine))
2395ca02815Sjsg 			return engine;
2405ca02815Sjsg 	}
2415ca02815Sjsg 
2425ca02815Sjsg 	return NULL;
2435ca02815Sjsg }
2445ca02815Sjsg 
pinned_context(struct intel_gt * gt)2455ca02815Sjsg static struct intel_context *pinned_context(struct intel_gt *gt)
2465ca02815Sjsg {
2475ca02815Sjsg 	static struct lock_class_key key;
2485ca02815Sjsg 	struct intel_engine_cs *engine;
2495ca02815Sjsg 	struct i915_address_space *vm;
2505ca02815Sjsg 	struct intel_context *ce;
2515ca02815Sjsg 
2525ca02815Sjsg 	engine = first_copy_engine(gt);
2535ca02815Sjsg 	if (!engine)
2545ca02815Sjsg 		return ERR_PTR(-ENODEV);
2555ca02815Sjsg 
2565ca02815Sjsg 	vm = migrate_vm(gt);
2575ca02815Sjsg 	if (IS_ERR(vm))
2585ca02815Sjsg 		return ERR_CAST(vm);
2595ca02815Sjsg 
2605ca02815Sjsg 	ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
2615ca02815Sjsg 						I915_GEM_HWS_MIGRATE,
2625ca02815Sjsg 						&key, "migrate");
2635ca02815Sjsg 	i915_vm_put(vm);
2645ca02815Sjsg 	return ce;
2655ca02815Sjsg }
2665ca02815Sjsg 
intel_migrate_init(struct intel_migrate * m,struct intel_gt * gt)2675ca02815Sjsg int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
2685ca02815Sjsg {
2695ca02815Sjsg 	struct intel_context *ce;
2705ca02815Sjsg 
2715ca02815Sjsg 	memset(m, 0, sizeof(*m));
2725ca02815Sjsg 
2735ca02815Sjsg 	ce = pinned_context(gt);
2745ca02815Sjsg 	if (IS_ERR(ce))
2755ca02815Sjsg 		return PTR_ERR(ce);
2765ca02815Sjsg 
2775ca02815Sjsg 	m->context = ce;
2785ca02815Sjsg 	return 0;
2795ca02815Sjsg }
2805ca02815Sjsg 
random_index(unsigned int max)2815ca02815Sjsg static int random_index(unsigned int max)
2825ca02815Sjsg {
2835ca02815Sjsg 	return upper_32_bits(mul_u32_u32(get_random_u32(), max));
2845ca02815Sjsg }
2855ca02815Sjsg 
__migrate_engines(struct intel_gt * gt)2865ca02815Sjsg static struct intel_context *__migrate_engines(struct intel_gt *gt)
2875ca02815Sjsg {
2885ca02815Sjsg 	struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
2895ca02815Sjsg 	struct intel_engine_cs *engine;
2905ca02815Sjsg 	unsigned int count, i;
2915ca02815Sjsg 
2925ca02815Sjsg 	count = 0;
2935ca02815Sjsg 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
2945ca02815Sjsg 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
2955ca02815Sjsg 		if (engine_supports_migration(engine))
2965ca02815Sjsg 			engines[count++] = engine;
2975ca02815Sjsg 	}
2985ca02815Sjsg 
2995ca02815Sjsg 	return intel_context_create(engines[random_index(count)]);
3005ca02815Sjsg }
3015ca02815Sjsg 
intel_migrate_create_context(struct intel_migrate * m)3025ca02815Sjsg struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
3035ca02815Sjsg {
3045ca02815Sjsg 	struct intel_context *ce;
3055ca02815Sjsg 
3065ca02815Sjsg 	/*
3075ca02815Sjsg 	 * We randomly distribute contexts across the engines upon constrction,
3085ca02815Sjsg 	 * as they all share the same pinned vm, and so in order to allow
3095ca02815Sjsg 	 * multiple blits to run in parallel, we must construct each blit
3105ca02815Sjsg 	 * to use a different range of the vm for its GTT. This has to be
3115ca02815Sjsg 	 * known at construction, so we can not use the late greedy load
3125ca02815Sjsg 	 * balancing of the virtual-engine.
3135ca02815Sjsg 	 */
3145ca02815Sjsg 	ce = __migrate_engines(m->context->engine->gt);
3155ca02815Sjsg 	if (IS_ERR(ce))
3165ca02815Sjsg 		return ce;
3175ca02815Sjsg 
3185ca02815Sjsg 	ce->ring = NULL;
3195ca02815Sjsg 	ce->ring_size = SZ_256K;
3205ca02815Sjsg 
3215ca02815Sjsg 	i915_vm_put(ce->vm);
3225ca02815Sjsg 	ce->vm = i915_vm_get(m->context->vm);
3235ca02815Sjsg 
3245ca02815Sjsg 	return ce;
3255ca02815Sjsg }
3265ca02815Sjsg 
sg_sgt(struct scatterlist * sg)3275ca02815Sjsg static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
3285ca02815Sjsg {
3295ca02815Sjsg 	dma_addr_t addr = sg_dma_address(sg);
3305ca02815Sjsg 
3315ca02815Sjsg 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
3325ca02815Sjsg }
3335ca02815Sjsg 
emit_no_arbitration(struct i915_request * rq)3345ca02815Sjsg static int emit_no_arbitration(struct i915_request *rq)
3355ca02815Sjsg {
3365ca02815Sjsg 	u32 *cs;
3375ca02815Sjsg 
3385ca02815Sjsg 	cs = intel_ring_begin(rq, 2);
3395ca02815Sjsg 	if (IS_ERR(cs))
3405ca02815Sjsg 		return PTR_ERR(cs);
3415ca02815Sjsg 
3425ca02815Sjsg 	/* Explicitly disable preemption for this request. */
3435ca02815Sjsg 	*cs++ = MI_ARB_ON_OFF;
3445ca02815Sjsg 	*cs++ = MI_NOOP;
3455ca02815Sjsg 	intel_ring_advance(rq, cs);
3465ca02815Sjsg 
3475ca02815Sjsg 	return 0;
3485ca02815Sjsg }
3495ca02815Sjsg 
max_pte_pkt_size(struct i915_request * rq,int pkt)350cba7b6c0Sjsg static int max_pte_pkt_size(struct i915_request *rq, int pkt)
351cba7b6c0Sjsg {
352cba7b6c0Sjsg 	struct intel_ring *ring = rq->ring;
353cba7b6c0Sjsg 
354cba7b6c0Sjsg 	pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
355cba7b6c0Sjsg 	pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
356cba7b6c0Sjsg 
357cba7b6c0Sjsg 	return pkt;
358cba7b6c0Sjsg }
359cba7b6c0Sjsg 
360*f005ef32Sjsg #define I915_EMIT_PTE_NUM_DWORDS 6
361*f005ef32Sjsg 
emit_pte(struct i915_request * rq,struct sgt_dma * it,unsigned int pat_index,bool is_lmem,u64 offset,int length)3625ca02815Sjsg static int emit_pte(struct i915_request *rq,
3635ca02815Sjsg 		    struct sgt_dma *it,
364*f005ef32Sjsg 		    unsigned int pat_index,
3655ca02815Sjsg 		    bool is_lmem,
3665ca02815Sjsg 		    u64 offset,
3675ca02815Sjsg 		    int length)
3685ca02815Sjsg {
369*f005ef32Sjsg 	bool has_64K_pages = HAS_64K_PAGES(rq->i915);
370*f005ef32Sjsg 	const u64 encode = rq->context->vm->pte_encode(0, pat_index,
3715ca02815Sjsg 						       is_lmem ? PTE_LM : 0);
3725ca02815Sjsg 	struct intel_ring *ring = rq->ring;
3731bb76ff1Sjsg 	int pkt, dword_length;
3741bb76ff1Sjsg 	u32 total = 0;
3751bb76ff1Sjsg 	u32 page_size;
3765ca02815Sjsg 	u32 *hdr, *cs;
3775ca02815Sjsg 
378*f005ef32Sjsg 	GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
3795ca02815Sjsg 
3801bb76ff1Sjsg 	page_size = I915_GTT_PAGE_SIZE;
3811bb76ff1Sjsg 	dword_length = 0x400;
3821bb76ff1Sjsg 
3835ca02815Sjsg 	/* Compute the page directory offset for the target address range */
3841bb76ff1Sjsg 	if (has_64K_pages) {
3851bb76ff1Sjsg 		GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M));
3861bb76ff1Sjsg 
3871bb76ff1Sjsg 		offset /= SZ_2M;
3881bb76ff1Sjsg 		offset *= SZ_64K;
3891bb76ff1Sjsg 		offset += 3 * CHUNK_SZ;
3901bb76ff1Sjsg 
3911bb76ff1Sjsg 		if (is_lmem) {
3921bb76ff1Sjsg 			page_size = I915_GTT_PAGE_SIZE_64K;
3931bb76ff1Sjsg 			dword_length = 0x40;
3941bb76ff1Sjsg 		}
3951bb76ff1Sjsg 	} else {
3965ca02815Sjsg 		offset >>= 12;
3975ca02815Sjsg 		offset *= sizeof(u64);
3985ca02815Sjsg 		offset += 2 * CHUNK_SZ;
3991bb76ff1Sjsg 	}
4001bb76ff1Sjsg 
4011bb76ff1Sjsg 	offset += (u64)rq->engine->instance << 32;
4025ca02815Sjsg 
403*f005ef32Sjsg 	cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
4045ca02815Sjsg 	if (IS_ERR(cs))
4055ca02815Sjsg 		return PTR_ERR(cs);
4065ca02815Sjsg 
4075ca02815Sjsg 	/* Pack as many PTE updates as possible into a single MI command */
408cba7b6c0Sjsg 	pkt = max_pte_pkt_size(rq, dword_length);
4095ca02815Sjsg 
4105ca02815Sjsg 	hdr = cs;
4115ca02815Sjsg 	*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
4125ca02815Sjsg 	*cs++ = lower_32_bits(offset);
4135ca02815Sjsg 	*cs++ = upper_32_bits(offset);
4145ca02815Sjsg 
4155ca02815Sjsg 	do {
4165ca02815Sjsg 		if (cs - hdr >= pkt) {
4171bb76ff1Sjsg 			int dword_rem;
4181bb76ff1Sjsg 
4195ca02815Sjsg 			*hdr += cs - hdr - 2;
4205ca02815Sjsg 			*cs++ = MI_NOOP;
4215ca02815Sjsg 
4225ca02815Sjsg 			ring->emit = (void *)cs - ring->vaddr;
4235ca02815Sjsg 			intel_ring_advance(rq, cs);
4245ca02815Sjsg 			intel_ring_update_space(ring);
4255ca02815Sjsg 
426*f005ef32Sjsg 			cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
4275ca02815Sjsg 			if (IS_ERR(cs))
4285ca02815Sjsg 				return PTR_ERR(cs);
4295ca02815Sjsg 
4301bb76ff1Sjsg 			dword_rem = dword_length;
4311bb76ff1Sjsg 			if (has_64K_pages) {
4321bb76ff1Sjsg 				if (IS_ALIGNED(total, SZ_2M)) {
4331bb76ff1Sjsg 					offset = round_up(offset, SZ_64K);
4341bb76ff1Sjsg 				} else {
4351bb76ff1Sjsg 					dword_rem = SZ_2M - (total & (SZ_2M - 1));
4361bb76ff1Sjsg 					dword_rem /= page_size;
4371bb76ff1Sjsg 					dword_rem *= 2;
4381bb76ff1Sjsg 				}
4391bb76ff1Sjsg 			}
4401bb76ff1Sjsg 
441cba7b6c0Sjsg 			pkt = max_pte_pkt_size(rq, dword_rem);
4425ca02815Sjsg 
4435ca02815Sjsg 			hdr = cs;
4445ca02815Sjsg 			*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
4455ca02815Sjsg 			*cs++ = lower_32_bits(offset);
4465ca02815Sjsg 			*cs++ = upper_32_bits(offset);
4475ca02815Sjsg 		}
4485ca02815Sjsg 
4491bb76ff1Sjsg 		GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
4501bb76ff1Sjsg 
4515ca02815Sjsg 		*cs++ = lower_32_bits(encode | it->dma);
4525ca02815Sjsg 		*cs++ = upper_32_bits(encode | it->dma);
4535ca02815Sjsg 
4545ca02815Sjsg 		offset += 8;
4551bb76ff1Sjsg 		total += page_size;
4565ca02815Sjsg 
4571bb76ff1Sjsg 		it->dma += page_size;
4585ca02815Sjsg 		if (it->dma >= it->max) {
4595ca02815Sjsg 			it->sg = __sg_next(it->sg);
4605ca02815Sjsg 			if (!it->sg || sg_dma_len(it->sg) == 0)
4615ca02815Sjsg 				break;
4625ca02815Sjsg 
4635ca02815Sjsg 			it->dma = sg_dma_address(it->sg);
4645ca02815Sjsg 			it->max = it->dma + sg_dma_len(it->sg);
4655ca02815Sjsg 		}
4665ca02815Sjsg 	} while (total < length);
4675ca02815Sjsg 
4685ca02815Sjsg 	*hdr += cs - hdr - 2;
4695ca02815Sjsg 	*cs++ = MI_NOOP;
4705ca02815Sjsg 
4715ca02815Sjsg 	ring->emit = (void *)cs - ring->vaddr;
4725ca02815Sjsg 	intel_ring_advance(rq, cs);
4735ca02815Sjsg 	intel_ring_update_space(ring);
4745ca02815Sjsg 
4755ca02815Sjsg 	return total;
4765ca02815Sjsg }
4775ca02815Sjsg 
wa_1209644611_applies(int ver,u32 size)4785ca02815Sjsg static bool wa_1209644611_applies(int ver, u32 size)
4795ca02815Sjsg {
4805ca02815Sjsg 	u32 height = size >> PAGE_SHIFT;
4815ca02815Sjsg 
4825ca02815Sjsg 	if (ver != 11)
4835ca02815Sjsg 		return false;
4845ca02815Sjsg 
4855ca02815Sjsg 	return height % 4 == 3 && height <= 8;
4865ca02815Sjsg }
4875ca02815Sjsg 
4881bb76ff1Sjsg /**
4891bb76ff1Sjsg  * DOC: Flat-CCS - Memory compression for Local memory
4901bb76ff1Sjsg  *
4911bb76ff1Sjsg  * On Xe-HP and later devices, we use dedicated compression control state (CCS)
4921bb76ff1Sjsg  * stored in local memory for each surface, to support the 3D and media
4931bb76ff1Sjsg  * compression formats.
4941bb76ff1Sjsg  *
4951bb76ff1Sjsg  * The memory required for the CCS of the entire local memory is 1/256 of the
4961bb76ff1Sjsg  * local memory size. So before the kernel boot, the required memory is reserved
4971bb76ff1Sjsg  * for the CCS data and a secure register will be programmed with the CCS base
4981bb76ff1Sjsg  * address.
4991bb76ff1Sjsg  *
5001bb76ff1Sjsg  * Flat CCS data needs to be cleared when a lmem object is allocated.
5011bb76ff1Sjsg  * And CCS data can be copied in and out of CCS region through
5021bb76ff1Sjsg  * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
5031bb76ff1Sjsg  *
5041bb76ff1Sjsg  * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
5051bb76ff1Sjsg  * its preference list, on memory pressure, i915 needs to migrate the lmem
5061bb76ff1Sjsg  * content into smem. If the lmem object is Flat-CCS compressed by userspace,
5071bb76ff1Sjsg  * then i915 needs to decompress it. But I915 lack the required information
5081bb76ff1Sjsg  * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
5091bb76ff1Sjsg  *
5101bb76ff1Sjsg  * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
5111bb76ff1Sjsg  * be temporarily evicted to smem, along with the auxiliary CCS state, where
5121bb76ff1Sjsg  * it can be potentially swapped-out at a later point, if required.
5131bb76ff1Sjsg  * If userspace later touches the evicted pages, then we always move
5141bb76ff1Sjsg  * the backing memory back to lmem, which includes restoring the saved CCS state,
5151bb76ff1Sjsg  * and potentially performing any required swap-in.
5161bb76ff1Sjsg  *
5171bb76ff1Sjsg  * For the migration of the lmem objects with smem in placement list, such as
5181bb76ff1Sjsg  * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
5191bb76ff1Sjsg  */
5201bb76ff1Sjsg 
i915_flush_dw(u32 * cmd,u32 flags)5211bb76ff1Sjsg static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
5221bb76ff1Sjsg {
5231bb76ff1Sjsg 	*cmd++ = MI_FLUSH_DW | flags;
5241bb76ff1Sjsg 	*cmd++ = 0;
5251bb76ff1Sjsg 	*cmd++ = 0;
5261bb76ff1Sjsg 
5271bb76ff1Sjsg 	return cmd;
5281bb76ff1Sjsg }
5291bb76ff1Sjsg 
emit_copy_ccs(struct i915_request * rq,u32 dst_offset,u8 dst_access,u32 src_offset,u8 src_access,int size)5301bb76ff1Sjsg static int emit_copy_ccs(struct i915_request *rq,
5311bb76ff1Sjsg 			 u32 dst_offset, u8 dst_access,
5321bb76ff1Sjsg 			 u32 src_offset, u8 src_access, int size)
5331bb76ff1Sjsg {
534*f005ef32Sjsg 	struct drm_i915_private *i915 = rq->i915;
5351bb76ff1Sjsg 	int mocs = rq->engine->gt->mocs.uc_index << 1;
5361bb76ff1Sjsg 	u32 num_ccs_blks;
5371bb76ff1Sjsg 	u32 *cs;
5381bb76ff1Sjsg 
5391bb76ff1Sjsg 	cs = intel_ring_begin(rq, 12);
5401bb76ff1Sjsg 	if (IS_ERR(cs))
5411bb76ff1Sjsg 		return PTR_ERR(cs);
5421bb76ff1Sjsg 
5431bb76ff1Sjsg 	num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
5441bb76ff1Sjsg 				    NUM_CCS_BYTES_PER_BLOCK);
5451bb76ff1Sjsg 	GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
5461bb76ff1Sjsg 	cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
5471bb76ff1Sjsg 
5481bb76ff1Sjsg 	/*
5491bb76ff1Sjsg 	 * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
5501bb76ff1Sjsg 	 * data in and out of the CCS region.
5511bb76ff1Sjsg 	 *
5521bb76ff1Sjsg 	 * We can copy at most 1024 blocks of 256 bytes using one
5531bb76ff1Sjsg 	 * XY_CTRL_SURF_COPY_BLT instruction.
5541bb76ff1Sjsg 	 *
5551bb76ff1Sjsg 	 * In case we need to copy more than 1024 blocks, we need to add
5561bb76ff1Sjsg 	 * another instruction to the same batch buffer.
5571bb76ff1Sjsg 	 *
5581bb76ff1Sjsg 	 * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
5591bb76ff1Sjsg 	 *
5601bb76ff1Sjsg 	 * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
5611bb76ff1Sjsg 	 */
5621bb76ff1Sjsg 	*cs++ = XY_CTRL_SURF_COPY_BLT |
5631bb76ff1Sjsg 		src_access << SRC_ACCESS_TYPE_SHIFT |
5641bb76ff1Sjsg 		dst_access << DST_ACCESS_TYPE_SHIFT |
5651bb76ff1Sjsg 		((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
5661bb76ff1Sjsg 	*cs++ = src_offset;
5671bb76ff1Sjsg 	*cs++ = rq->engine->instance |
5681bb76ff1Sjsg 		FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
5691bb76ff1Sjsg 	*cs++ = dst_offset;
5701bb76ff1Sjsg 	*cs++ = rq->engine->instance |
5711bb76ff1Sjsg 		FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
5721bb76ff1Sjsg 
5731bb76ff1Sjsg 	cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
5741bb76ff1Sjsg 	*cs++ = MI_NOOP;
5751bb76ff1Sjsg 
5761bb76ff1Sjsg 	intel_ring_advance(rq, cs);
5771bb76ff1Sjsg 
5781bb76ff1Sjsg 	return 0;
5791bb76ff1Sjsg }
5801bb76ff1Sjsg 
emit_copy(struct i915_request * rq,u32 dst_offset,u32 src_offset,int size)5811bb76ff1Sjsg static int emit_copy(struct i915_request *rq,
5821bb76ff1Sjsg 		     u32 dst_offset, u32 src_offset, int size)
5835ca02815Sjsg {
584*f005ef32Sjsg 	const int ver = GRAPHICS_VER(rq->i915);
5855ca02815Sjsg 	u32 instance = rq->engine->instance;
5865ca02815Sjsg 	u32 *cs;
5875ca02815Sjsg 
5885ca02815Sjsg 	cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
5895ca02815Sjsg 	if (IS_ERR(cs))
5905ca02815Sjsg 		return PTR_ERR(cs);
5915ca02815Sjsg 
5925ca02815Sjsg 	if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
5935ca02815Sjsg 		*cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
5945ca02815Sjsg 		*cs++ = BLT_DEPTH_32 | PAGE_SIZE;
5955ca02815Sjsg 		*cs++ = 0;
5965ca02815Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
5971bb76ff1Sjsg 		*cs++ = dst_offset;
5985ca02815Sjsg 		*cs++ = instance;
5995ca02815Sjsg 		*cs++ = 0;
6005ca02815Sjsg 		*cs++ = PAGE_SIZE;
6011bb76ff1Sjsg 		*cs++ = src_offset;
6025ca02815Sjsg 		*cs++ = instance;
6035ca02815Sjsg 	} else if (ver >= 8) {
6045ca02815Sjsg 		*cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
6055ca02815Sjsg 		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
6065ca02815Sjsg 		*cs++ = 0;
6075ca02815Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
6081bb76ff1Sjsg 		*cs++ = dst_offset;
6095ca02815Sjsg 		*cs++ = instance;
6105ca02815Sjsg 		*cs++ = 0;
6115ca02815Sjsg 		*cs++ = PAGE_SIZE;
6121bb76ff1Sjsg 		*cs++ = src_offset;
6135ca02815Sjsg 		*cs++ = instance;
6145ca02815Sjsg 	} else {
6155ca02815Sjsg 		GEM_BUG_ON(instance);
6165ca02815Sjsg 		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
6175ca02815Sjsg 		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
6185ca02815Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
6191bb76ff1Sjsg 		*cs++ = dst_offset;
6205ca02815Sjsg 		*cs++ = PAGE_SIZE;
6211bb76ff1Sjsg 		*cs++ = src_offset;
6225ca02815Sjsg 	}
6235ca02815Sjsg 
6245ca02815Sjsg 	intel_ring_advance(rq, cs);
6255ca02815Sjsg 	return 0;
6265ca02815Sjsg }
6275ca02815Sjsg 
scatter_list_length(struct scatterlist * sg)6281bb76ff1Sjsg static u64 scatter_list_length(struct scatterlist *sg)
6291bb76ff1Sjsg {
6301bb76ff1Sjsg 	u64 len = 0;
6311bb76ff1Sjsg 
6321bb76ff1Sjsg 	while (sg && sg_dma_len(sg)) {
6331bb76ff1Sjsg 		len += sg_dma_len(sg);
6341bb76ff1Sjsg 		sg = sg_next(sg);
6351bb76ff1Sjsg 	}
6361bb76ff1Sjsg 
6371bb76ff1Sjsg 	return len;
6381bb76ff1Sjsg }
6391bb76ff1Sjsg 
6401bb76ff1Sjsg static int
calculate_chunk_sz(struct drm_i915_private * i915,bool src_is_lmem,u64 bytes_to_cpy,u64 ccs_bytes_to_cpy)6411bb76ff1Sjsg calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
6421bb76ff1Sjsg 		   u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
6431bb76ff1Sjsg {
6441bb76ff1Sjsg 	if (ccs_bytes_to_cpy && !src_is_lmem)
6451bb76ff1Sjsg 		/*
6461bb76ff1Sjsg 		 * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
6471bb76ff1Sjsg 		 * will be taken for the blt. in Flat-ccs supported
6481bb76ff1Sjsg 		 * platform Smem obj will have more pages than required
6491bb76ff1Sjsg 		 * for main meory hence limit it to the required size
6501bb76ff1Sjsg 		 * for main memory
6511bb76ff1Sjsg 		 */
6521bb76ff1Sjsg 		return min_t(u64, bytes_to_cpy, CHUNK_SZ);
6531bb76ff1Sjsg 	else
6541bb76ff1Sjsg 		return CHUNK_SZ;
6551bb76ff1Sjsg }
6561bb76ff1Sjsg 
get_ccs_sg_sgt(struct sgt_dma * it,u64 bytes_to_cpy)6571bb76ff1Sjsg static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
6581bb76ff1Sjsg {
6591bb76ff1Sjsg 	u64 len;
6601bb76ff1Sjsg 
6611bb76ff1Sjsg 	do {
6621bb76ff1Sjsg 		GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
6631bb76ff1Sjsg 		len = it->max - it->dma;
6641bb76ff1Sjsg 		if (len > bytes_to_cpy) {
6651bb76ff1Sjsg 			it->dma += bytes_to_cpy;
6661bb76ff1Sjsg 			break;
6671bb76ff1Sjsg 		}
6681bb76ff1Sjsg 
6691bb76ff1Sjsg 		bytes_to_cpy -= len;
6701bb76ff1Sjsg 
6711bb76ff1Sjsg 		it->sg = __sg_next(it->sg);
6721bb76ff1Sjsg 		it->dma = sg_dma_address(it->sg);
6731bb76ff1Sjsg 		it->max = it->dma + sg_dma_len(it->sg);
6741bb76ff1Sjsg 	} while (bytes_to_cpy);
6751bb76ff1Sjsg }
6761bb76ff1Sjsg 
6775ca02815Sjsg int
intel_context_migrate_copy(struct intel_context * ce,const struct i915_deps * deps,struct scatterlist * src,unsigned int src_pat_index,bool src_is_lmem,struct scatterlist * dst,unsigned int dst_pat_index,bool dst_is_lmem,struct i915_request ** out)6785ca02815Sjsg intel_context_migrate_copy(struct intel_context *ce,
6791bb76ff1Sjsg 			   const struct i915_deps *deps,
6805ca02815Sjsg 			   struct scatterlist *src,
681*f005ef32Sjsg 			   unsigned int src_pat_index,
6825ca02815Sjsg 			   bool src_is_lmem,
6835ca02815Sjsg 			   struct scatterlist *dst,
684*f005ef32Sjsg 			   unsigned int dst_pat_index,
6855ca02815Sjsg 			   bool dst_is_lmem,
6865ca02815Sjsg 			   struct i915_request **out)
6875ca02815Sjsg {
6881bb76ff1Sjsg 	struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
6891bb76ff1Sjsg 	struct drm_i915_private *i915 = ce->engine->i915;
6901bb76ff1Sjsg 	u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
691*f005ef32Sjsg 	unsigned int ccs_pat_index;
6921bb76ff1Sjsg 	u32 src_offset, dst_offset;
6931bb76ff1Sjsg 	u8 src_access, dst_access;
6945ca02815Sjsg 	struct i915_request *rq;
6951bb76ff1Sjsg 	u64 src_sz, dst_sz;
6961bb76ff1Sjsg 	bool ccs_is_src, overwrite_ccs;
6975ca02815Sjsg 	int err;
6985ca02815Sjsg 
6995ca02815Sjsg 	GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
7001bb76ff1Sjsg 	GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
7015ca02815Sjsg 	*out = NULL;
7025ca02815Sjsg 
7035ca02815Sjsg 	GEM_BUG_ON(ce->ring->size < SZ_64K);
7045ca02815Sjsg 
7051bb76ff1Sjsg 	src_sz = scatter_list_length(src);
7061bb76ff1Sjsg 	bytes_to_cpy = src_sz;
7071bb76ff1Sjsg 
7081bb76ff1Sjsg 	if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
7091bb76ff1Sjsg 		src_access = !src_is_lmem && dst_is_lmem;
7101bb76ff1Sjsg 		dst_access = !src_access;
7111bb76ff1Sjsg 
7121bb76ff1Sjsg 		dst_sz = scatter_list_length(dst);
7131bb76ff1Sjsg 		if (src_is_lmem) {
7141bb76ff1Sjsg 			it_ccs = it_dst;
715*f005ef32Sjsg 			ccs_pat_index = dst_pat_index;
7161bb76ff1Sjsg 			ccs_is_src = false;
7171bb76ff1Sjsg 		} else if (dst_is_lmem) {
7181bb76ff1Sjsg 			bytes_to_cpy = dst_sz;
7191bb76ff1Sjsg 			it_ccs = it_src;
720*f005ef32Sjsg 			ccs_pat_index = src_pat_index;
7211bb76ff1Sjsg 			ccs_is_src = true;
7221bb76ff1Sjsg 		}
7231bb76ff1Sjsg 
7241bb76ff1Sjsg 		/*
7251bb76ff1Sjsg 		 * When there is a eviction of ccs needed smem will have the
7261bb76ff1Sjsg 		 * extra pages for the ccs data
7271bb76ff1Sjsg 		 *
7281bb76ff1Sjsg 		 * TO-DO: Want to move the size mismatch check to a WARN_ON,
7291bb76ff1Sjsg 		 * but still we have some requests of smem->lmem with same size.
7301bb76ff1Sjsg 		 * Need to fix it.
7311bb76ff1Sjsg 		 */
7321bb76ff1Sjsg 		ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
7331bb76ff1Sjsg 		if (ccs_bytes_to_cpy)
7341bb76ff1Sjsg 			get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
7351bb76ff1Sjsg 	}
7361bb76ff1Sjsg 
7371bb76ff1Sjsg 	overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
7381bb76ff1Sjsg 
7391bb76ff1Sjsg 	src_offset = 0;
7401bb76ff1Sjsg 	dst_offset = CHUNK_SZ;
7411bb76ff1Sjsg 	if (HAS_64K_PAGES(ce->engine->i915)) {
7421bb76ff1Sjsg 		src_offset = 0;
7431bb76ff1Sjsg 		dst_offset = 0;
7441bb76ff1Sjsg 		if (src_is_lmem)
7451bb76ff1Sjsg 			src_offset = CHUNK_SZ;
7461bb76ff1Sjsg 		if (dst_is_lmem)
7471bb76ff1Sjsg 			dst_offset = 2 * CHUNK_SZ;
7481bb76ff1Sjsg 	}
7491bb76ff1Sjsg 
7505ca02815Sjsg 	do {
7515ca02815Sjsg 		int len;
7525ca02815Sjsg 
7535ca02815Sjsg 		rq = i915_request_create(ce);
7545ca02815Sjsg 		if (IS_ERR(rq)) {
7555ca02815Sjsg 			err = PTR_ERR(rq);
7565ca02815Sjsg 			goto out_ce;
7575ca02815Sjsg 		}
7585ca02815Sjsg 
7591bb76ff1Sjsg 		if (deps) {
7601bb76ff1Sjsg 			err = i915_request_await_deps(rq, deps);
7615ca02815Sjsg 			if (err)
7625ca02815Sjsg 				goto out_rq;
7635ca02815Sjsg 
7645ca02815Sjsg 			if (rq->engine->emit_init_breadcrumb) {
7655ca02815Sjsg 				err = rq->engine->emit_init_breadcrumb(rq);
7665ca02815Sjsg 				if (err)
7675ca02815Sjsg 					goto out_rq;
7685ca02815Sjsg 			}
7695ca02815Sjsg 
7701bb76ff1Sjsg 			deps = NULL;
7715ca02815Sjsg 		}
7725ca02815Sjsg 
7735ca02815Sjsg 		/* The PTE updates + copy must not be interrupted. */
7745ca02815Sjsg 		err = emit_no_arbitration(rq);
7755ca02815Sjsg 		if (err)
7765ca02815Sjsg 			goto out_rq;
7775ca02815Sjsg 
7781bb76ff1Sjsg 		src_sz = calculate_chunk_sz(i915, src_is_lmem,
7791bb76ff1Sjsg 					    bytes_to_cpy, ccs_bytes_to_cpy);
7801bb76ff1Sjsg 
781*f005ef32Sjsg 		len = emit_pte(rq, &it_src, src_pat_index, src_is_lmem,
7821bb76ff1Sjsg 			       src_offset, src_sz);
7831bb76ff1Sjsg 		if (!len) {
7841bb76ff1Sjsg 			err = -EINVAL;
7851bb76ff1Sjsg 			goto out_rq;
7861bb76ff1Sjsg 		}
7871bb76ff1Sjsg 		if (len < 0) {
7885ca02815Sjsg 			err = len;
7895ca02815Sjsg 			goto out_rq;
7905ca02815Sjsg 		}
7915ca02815Sjsg 
792*f005ef32Sjsg 		err = emit_pte(rq, &it_dst, dst_pat_index, dst_is_lmem,
7931bb76ff1Sjsg 			       dst_offset, len);
7945ca02815Sjsg 		if (err < 0)
7955ca02815Sjsg 			goto out_rq;
7965ca02815Sjsg 		if (err < len) {
7975ca02815Sjsg 			err = -EINVAL;
7985ca02815Sjsg 			goto out_rq;
7995ca02815Sjsg 		}
8005ca02815Sjsg 
8015ca02815Sjsg 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8025ca02815Sjsg 		if (err)
8035ca02815Sjsg 			goto out_rq;
8045ca02815Sjsg 
8051bb76ff1Sjsg 		err = emit_copy(rq, dst_offset,	src_offset, len);
8061bb76ff1Sjsg 		if (err)
8071bb76ff1Sjsg 			goto out_rq;
8081bb76ff1Sjsg 
8091bb76ff1Sjsg 		bytes_to_cpy -= len;
8101bb76ff1Sjsg 
8111bb76ff1Sjsg 		if (ccs_bytes_to_cpy) {
8121bb76ff1Sjsg 			int ccs_sz;
8131bb76ff1Sjsg 
8141bb76ff1Sjsg 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8151bb76ff1Sjsg 			if (err)
8161bb76ff1Sjsg 				goto out_rq;
8171bb76ff1Sjsg 
8181bb76ff1Sjsg 			ccs_sz = GET_CCS_BYTES(i915, len);
819*f005ef32Sjsg 			err = emit_pte(rq, &it_ccs, ccs_pat_index, false,
8201bb76ff1Sjsg 				       ccs_is_src ? src_offset : dst_offset,
8211bb76ff1Sjsg 				       ccs_sz);
8221bb76ff1Sjsg 			if (err < 0)
8231bb76ff1Sjsg 				goto out_rq;
8241bb76ff1Sjsg 			if (err < ccs_sz) {
8251bb76ff1Sjsg 				err = -EINVAL;
8261bb76ff1Sjsg 				goto out_rq;
8271bb76ff1Sjsg 			}
8281bb76ff1Sjsg 
8291bb76ff1Sjsg 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8301bb76ff1Sjsg 			if (err)
8311bb76ff1Sjsg 				goto out_rq;
8321bb76ff1Sjsg 
8331bb76ff1Sjsg 			err = emit_copy_ccs(rq, dst_offset, dst_access,
8341bb76ff1Sjsg 					    src_offset, src_access, len);
8351bb76ff1Sjsg 			if (err)
8361bb76ff1Sjsg 				goto out_rq;
8371bb76ff1Sjsg 
8381bb76ff1Sjsg 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8391bb76ff1Sjsg 			if (err)
8401bb76ff1Sjsg 				goto out_rq;
8411bb76ff1Sjsg 			ccs_bytes_to_cpy -= ccs_sz;
8421bb76ff1Sjsg 		} else if (overwrite_ccs) {
8431bb76ff1Sjsg 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8441bb76ff1Sjsg 			if (err)
8451bb76ff1Sjsg 				goto out_rq;
8461bb76ff1Sjsg 
847*f005ef32Sjsg 			if (src_is_lmem) {
8481bb76ff1Sjsg 				/*
849*f005ef32Sjsg 				 * If the src is already in lmem, then we must
850*f005ef32Sjsg 				 * be doing an lmem -> lmem transfer, and so
851*f005ef32Sjsg 				 * should be safe to directly copy the CCS
852*f005ef32Sjsg 				 * state. In this case we have either
853*f005ef32Sjsg 				 * initialised the CCS aux state when first
854*f005ef32Sjsg 				 * clearing the pages (since it is already
855*f005ef32Sjsg 				 * allocated in lmem), or the user has
856*f005ef32Sjsg 				 * potentially populated it, in which case we
857*f005ef32Sjsg 				 * need to copy the CCS state as-is.
8581bb76ff1Sjsg 				 */
859*f005ef32Sjsg 				err = emit_copy_ccs(rq,
860*f005ef32Sjsg 						    dst_offset, INDIRECT_ACCESS,
861*f005ef32Sjsg 						    src_offset, INDIRECT_ACCESS,
862*f005ef32Sjsg 						    len);
863*f005ef32Sjsg 			} else {
864*f005ef32Sjsg 				/*
865*f005ef32Sjsg 				 * While we can't always restore/manage the CCS
866*f005ef32Sjsg 				 * state, we still need to ensure we don't leak
867*f005ef32Sjsg 				 * the CCS state from the previous user, so make
868*f005ef32Sjsg 				 * sure we overwrite it with something.
869*f005ef32Sjsg 				 */
870*f005ef32Sjsg 				err = emit_copy_ccs(rq,
871*f005ef32Sjsg 						    dst_offset, INDIRECT_ACCESS,
872*f005ef32Sjsg 						    dst_offset, DIRECT_ACCESS,
873*f005ef32Sjsg 						    len);
874*f005ef32Sjsg 			}
875*f005ef32Sjsg 
8761bb76ff1Sjsg 			if (err)
8771bb76ff1Sjsg 				goto out_rq;
8781bb76ff1Sjsg 
8791bb76ff1Sjsg 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
8801bb76ff1Sjsg 			if (err)
8811bb76ff1Sjsg 				goto out_rq;
8821bb76ff1Sjsg 		}
8835ca02815Sjsg 
8845ca02815Sjsg 		/* Arbitration is re-enabled between requests. */
8855ca02815Sjsg out_rq:
8865ca02815Sjsg 		if (*out)
8875ca02815Sjsg 			i915_request_put(*out);
8885ca02815Sjsg 		*out = i915_request_get(rq);
8895ca02815Sjsg 		i915_request_add(rq);
8901bb76ff1Sjsg 
8911bb76ff1Sjsg 		if (err)
8925ca02815Sjsg 			break;
8935ca02815Sjsg 
8941bb76ff1Sjsg 		if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
8951bb76ff1Sjsg 			if (src_is_lmem)
8961bb76ff1Sjsg 				WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
8971bb76ff1Sjsg 			else
8981bb76ff1Sjsg 				WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
8991bb76ff1Sjsg 			break;
9001bb76ff1Sjsg 		}
9011bb76ff1Sjsg 
9021bb76ff1Sjsg 		if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
9031bb76ff1Sjsg 			    !it_dst.sg || !sg_dma_len(it_dst.sg) ||
9041bb76ff1Sjsg 			    (ccs_bytes_to_cpy && (!it_ccs.sg ||
9051bb76ff1Sjsg 						  !sg_dma_len(it_ccs.sg))))) {
9061bb76ff1Sjsg 			err = -EINVAL;
9071bb76ff1Sjsg 			break;
9081bb76ff1Sjsg 		}
9091bb76ff1Sjsg 
9105ca02815Sjsg 		cond_resched();
9115ca02815Sjsg 	} while (1);
9125ca02815Sjsg 
9135ca02815Sjsg out_ce:
9145ca02815Sjsg 	return err;
9155ca02815Sjsg }
9165ca02815Sjsg 
emit_clear(struct i915_request * rq,u32 offset,int size,u32 value,bool is_lmem)9171bb76ff1Sjsg static int emit_clear(struct i915_request *rq, u32 offset, int size,
9181bb76ff1Sjsg 		      u32 value, bool is_lmem)
9195ca02815Sjsg {
920*f005ef32Sjsg 	struct drm_i915_private *i915 = rq->i915;
9211bb76ff1Sjsg 	int mocs = rq->engine->gt->mocs.uc_index << 1;
9221bb76ff1Sjsg 	const int ver = GRAPHICS_VER(i915);
9231bb76ff1Sjsg 	int ring_sz;
9245ca02815Sjsg 	u32 *cs;
9255ca02815Sjsg 
9265ca02815Sjsg 	GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
9275ca02815Sjsg 
928*f005ef32Sjsg 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
9291bb76ff1Sjsg 		ring_sz = XY_FAST_COLOR_BLT_DW;
9301bb76ff1Sjsg 	else if (ver >= 8)
9311bb76ff1Sjsg 		ring_sz = 8;
9321bb76ff1Sjsg 	else
9331bb76ff1Sjsg 		ring_sz = 6;
9341bb76ff1Sjsg 
9351bb76ff1Sjsg 	cs = intel_ring_begin(rq, ring_sz);
9365ca02815Sjsg 	if (IS_ERR(cs))
9375ca02815Sjsg 		return PTR_ERR(cs);
9385ca02815Sjsg 
939*f005ef32Sjsg 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
9401bb76ff1Sjsg 		*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
9411bb76ff1Sjsg 			(XY_FAST_COLOR_BLT_DW - 2);
9421bb76ff1Sjsg 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
9431bb76ff1Sjsg 			(PAGE_SIZE - 1);
9441bb76ff1Sjsg 		*cs++ = 0;
9451bb76ff1Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
9461bb76ff1Sjsg 		*cs++ = offset;
9471bb76ff1Sjsg 		*cs++ = rq->engine->instance;
9481bb76ff1Sjsg 		*cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
9491bb76ff1Sjsg 		/* BG7 */
9501bb76ff1Sjsg 		*cs++ = value;
9511bb76ff1Sjsg 		*cs++ = 0;
9521bb76ff1Sjsg 		*cs++ = 0;
9531bb76ff1Sjsg 		*cs++ = 0;
9541bb76ff1Sjsg 		/* BG11 */
9551bb76ff1Sjsg 		*cs++ = 0;
9561bb76ff1Sjsg 		*cs++ = 0;
9571bb76ff1Sjsg 		/* BG13 */
9581bb76ff1Sjsg 		*cs++ = 0;
9591bb76ff1Sjsg 		*cs++ = 0;
9601bb76ff1Sjsg 		*cs++ = 0;
9611bb76ff1Sjsg 	} else if (ver >= 8) {
9625ca02815Sjsg 		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
9635ca02815Sjsg 		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
9645ca02815Sjsg 		*cs++ = 0;
9655ca02815Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
9661bb76ff1Sjsg 		*cs++ = offset;
9671bb76ff1Sjsg 		*cs++ = rq->engine->instance;
9685ca02815Sjsg 		*cs++ = value;
9695ca02815Sjsg 		*cs++ = MI_NOOP;
9705ca02815Sjsg 	} else {
9715ca02815Sjsg 		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
9725ca02815Sjsg 		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
9735ca02815Sjsg 		*cs++ = 0;
9745ca02815Sjsg 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
9751bb76ff1Sjsg 		*cs++ = offset;
9765ca02815Sjsg 		*cs++ = value;
9775ca02815Sjsg 	}
9785ca02815Sjsg 
9795ca02815Sjsg 	intel_ring_advance(rq, cs);
9805ca02815Sjsg 	return 0;
9815ca02815Sjsg }
9825ca02815Sjsg 
9835ca02815Sjsg int
intel_context_migrate_clear(struct intel_context * ce,const struct i915_deps * deps,struct scatterlist * sg,unsigned int pat_index,bool is_lmem,u32 value,struct i915_request ** out)9845ca02815Sjsg intel_context_migrate_clear(struct intel_context *ce,
9851bb76ff1Sjsg 			    const struct i915_deps *deps,
9865ca02815Sjsg 			    struct scatterlist *sg,
987*f005ef32Sjsg 			    unsigned int pat_index,
9885ca02815Sjsg 			    bool is_lmem,
9895ca02815Sjsg 			    u32 value,
9905ca02815Sjsg 			    struct i915_request **out)
9915ca02815Sjsg {
9921bb76ff1Sjsg 	struct drm_i915_private *i915 = ce->engine->i915;
9935ca02815Sjsg 	struct sgt_dma it = sg_sgt(sg);
9945ca02815Sjsg 	struct i915_request *rq;
9951bb76ff1Sjsg 	u32 offset;
9965ca02815Sjsg 	int err;
9975ca02815Sjsg 
9985ca02815Sjsg 	GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
9995ca02815Sjsg 	*out = NULL;
10005ca02815Sjsg 
10015ca02815Sjsg 	GEM_BUG_ON(ce->ring->size < SZ_64K);
10025ca02815Sjsg 
10031bb76ff1Sjsg 	offset = 0;
10041bb76ff1Sjsg 	if (HAS_64K_PAGES(i915) && is_lmem)
10051bb76ff1Sjsg 		offset = CHUNK_SZ;
10061bb76ff1Sjsg 
10075ca02815Sjsg 	do {
10085ca02815Sjsg 		int len;
10095ca02815Sjsg 
10105ca02815Sjsg 		rq = i915_request_create(ce);
10115ca02815Sjsg 		if (IS_ERR(rq)) {
10125ca02815Sjsg 			err = PTR_ERR(rq);
10135ca02815Sjsg 			goto out_ce;
10145ca02815Sjsg 		}
10155ca02815Sjsg 
10161bb76ff1Sjsg 		if (deps) {
10171bb76ff1Sjsg 			err = i915_request_await_deps(rq, deps);
10185ca02815Sjsg 			if (err)
10195ca02815Sjsg 				goto out_rq;
10205ca02815Sjsg 
10215ca02815Sjsg 			if (rq->engine->emit_init_breadcrumb) {
10225ca02815Sjsg 				err = rq->engine->emit_init_breadcrumb(rq);
10235ca02815Sjsg 				if (err)
10245ca02815Sjsg 					goto out_rq;
10255ca02815Sjsg 			}
10265ca02815Sjsg 
10271bb76ff1Sjsg 			deps = NULL;
10285ca02815Sjsg 		}
10295ca02815Sjsg 
10305ca02815Sjsg 		/* The PTE updates + clear must not be interrupted. */
10315ca02815Sjsg 		err = emit_no_arbitration(rq);
10325ca02815Sjsg 		if (err)
10335ca02815Sjsg 			goto out_rq;
10345ca02815Sjsg 
1035*f005ef32Sjsg 		len = emit_pte(rq, &it, pat_index, is_lmem, offset, CHUNK_SZ);
10365ca02815Sjsg 		if (len <= 0) {
10375ca02815Sjsg 			err = len;
10385ca02815Sjsg 			goto out_rq;
10395ca02815Sjsg 		}
10405ca02815Sjsg 
10415ca02815Sjsg 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
10425ca02815Sjsg 		if (err)
10435ca02815Sjsg 			goto out_rq;
10445ca02815Sjsg 
10451bb76ff1Sjsg 		err = emit_clear(rq, offset, len, value, is_lmem);
10461bb76ff1Sjsg 		if (err)
10471bb76ff1Sjsg 			goto out_rq;
10481bb76ff1Sjsg 
10491bb76ff1Sjsg 		if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
10501bb76ff1Sjsg 			/*
10511bb76ff1Sjsg 			 * copy the content of memory into corresponding
10521bb76ff1Sjsg 			 * ccs surface
10531bb76ff1Sjsg 			 */
10541bb76ff1Sjsg 			err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
10551bb76ff1Sjsg 					    DIRECT_ACCESS, len);
10561bb76ff1Sjsg 			if (err)
10571bb76ff1Sjsg 				goto out_rq;
10581bb76ff1Sjsg 		}
10591bb76ff1Sjsg 
10601bb76ff1Sjsg 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
10615ca02815Sjsg 
10625ca02815Sjsg 		/* Arbitration is re-enabled between requests. */
10635ca02815Sjsg out_rq:
10645ca02815Sjsg 		if (*out)
10655ca02815Sjsg 			i915_request_put(*out);
10665ca02815Sjsg 		*out = i915_request_get(rq);
10675ca02815Sjsg 		i915_request_add(rq);
10685ca02815Sjsg 		if (err || !it.sg || !sg_dma_len(it.sg))
10695ca02815Sjsg 			break;
10705ca02815Sjsg 
10715ca02815Sjsg 		cond_resched();
10725ca02815Sjsg 	} while (1);
10735ca02815Sjsg 
10745ca02815Sjsg out_ce:
10755ca02815Sjsg 	return err;
10765ca02815Sjsg }
10775ca02815Sjsg 
intel_migrate_copy(struct intel_migrate * m,struct i915_gem_ww_ctx * ww,const struct i915_deps * deps,struct scatterlist * src,unsigned int src_pat_index,bool src_is_lmem,struct scatterlist * dst,unsigned int dst_pat_index,bool dst_is_lmem,struct i915_request ** out)10785ca02815Sjsg int intel_migrate_copy(struct intel_migrate *m,
10795ca02815Sjsg 		       struct i915_gem_ww_ctx *ww,
10801bb76ff1Sjsg 		       const struct i915_deps *deps,
10815ca02815Sjsg 		       struct scatterlist *src,
1082*f005ef32Sjsg 		       unsigned int src_pat_index,
10835ca02815Sjsg 		       bool src_is_lmem,
10845ca02815Sjsg 		       struct scatterlist *dst,
1085*f005ef32Sjsg 		       unsigned int dst_pat_index,
10865ca02815Sjsg 		       bool dst_is_lmem,
10875ca02815Sjsg 		       struct i915_request **out)
10885ca02815Sjsg {
10895ca02815Sjsg 	struct intel_context *ce;
10905ca02815Sjsg 	int err;
10915ca02815Sjsg 
10925ca02815Sjsg 	*out = NULL;
10935ca02815Sjsg 	if (!m->context)
10945ca02815Sjsg 		return -ENODEV;
10955ca02815Sjsg 
10965ca02815Sjsg 	ce = intel_migrate_create_context(m);
10975ca02815Sjsg 	if (IS_ERR(ce))
10985ca02815Sjsg 		ce = intel_context_get(m->context);
10995ca02815Sjsg 	GEM_BUG_ON(IS_ERR(ce));
11005ca02815Sjsg 
11015ca02815Sjsg 	err = intel_context_pin_ww(ce, ww);
11025ca02815Sjsg 	if (err)
11035ca02815Sjsg 		goto out;
11045ca02815Sjsg 
11051bb76ff1Sjsg 	err = intel_context_migrate_copy(ce, deps,
1106*f005ef32Sjsg 					 src, src_pat_index, src_is_lmem,
1107*f005ef32Sjsg 					 dst, dst_pat_index, dst_is_lmem,
11085ca02815Sjsg 					 out);
11095ca02815Sjsg 
11105ca02815Sjsg 	intel_context_unpin(ce);
11115ca02815Sjsg out:
11125ca02815Sjsg 	intel_context_put(ce);
11135ca02815Sjsg 	return err;
11145ca02815Sjsg }
11155ca02815Sjsg 
11165ca02815Sjsg int
intel_migrate_clear(struct intel_migrate * m,struct i915_gem_ww_ctx * ww,const struct i915_deps * deps,struct scatterlist * sg,unsigned int pat_index,bool is_lmem,u32 value,struct i915_request ** out)11175ca02815Sjsg intel_migrate_clear(struct intel_migrate *m,
11185ca02815Sjsg 		    struct i915_gem_ww_ctx *ww,
11191bb76ff1Sjsg 		    const struct i915_deps *deps,
11205ca02815Sjsg 		    struct scatterlist *sg,
1121*f005ef32Sjsg 		    unsigned int pat_index,
11225ca02815Sjsg 		    bool is_lmem,
11235ca02815Sjsg 		    u32 value,
11245ca02815Sjsg 		    struct i915_request **out)
11255ca02815Sjsg {
11265ca02815Sjsg 	struct intel_context *ce;
11275ca02815Sjsg 	int err;
11285ca02815Sjsg 
11295ca02815Sjsg 	*out = NULL;
11305ca02815Sjsg 	if (!m->context)
11315ca02815Sjsg 		return -ENODEV;
11325ca02815Sjsg 
11335ca02815Sjsg 	ce = intel_migrate_create_context(m);
11345ca02815Sjsg 	if (IS_ERR(ce))
11355ca02815Sjsg 		ce = intel_context_get(m->context);
11365ca02815Sjsg 	GEM_BUG_ON(IS_ERR(ce));
11375ca02815Sjsg 
11385ca02815Sjsg 	err = intel_context_pin_ww(ce, ww);
11395ca02815Sjsg 	if (err)
11405ca02815Sjsg 		goto out;
11415ca02815Sjsg 
1142*f005ef32Sjsg 	err = intel_context_migrate_clear(ce, deps, sg, pat_index,
11435ca02815Sjsg 					  is_lmem, value, out);
11445ca02815Sjsg 
11455ca02815Sjsg 	intel_context_unpin(ce);
11465ca02815Sjsg out:
11475ca02815Sjsg 	intel_context_put(ce);
11485ca02815Sjsg 	return err;
11495ca02815Sjsg }
11505ca02815Sjsg 
intel_migrate_fini(struct intel_migrate * m)11515ca02815Sjsg void intel_migrate_fini(struct intel_migrate *m)
11525ca02815Sjsg {
11535ca02815Sjsg 	struct intel_context *ce;
11545ca02815Sjsg 
11555ca02815Sjsg 	ce = fetch_and_zero(&m->context);
11565ca02815Sjsg 	if (!ce)
11575ca02815Sjsg 		return;
11585ca02815Sjsg 
11595ca02815Sjsg 	intel_engine_destroy_pinned_context(ce);
11605ca02815Sjsg }
11615ca02815Sjsg 
11625ca02815Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
11635ca02815Sjsg #include "selftest_migrate.c"
11645ca02815Sjsg #endif
1165