Lines Matching refs:batch

204 	struct i915_vma *batch; /** identity of the batch obj/vma */  member
749 eb->batch = eb->vma[i]; in eb_lookup_vmas()
750 GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]); in eb_lookup_vmas()
880 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); in reloc_gpu_flush()
882 i915_gem_object_unpin_map(cache->rq->batch->obj); in reloc_gpu_flush()
1070 struct i915_vma *batch; in __reloc_gpu_alloc() local
1092 batch = i915_vma_instance(obj, vma->vm, NULL); in __reloc_gpu_alloc()
1093 if (IS_ERR(batch)) { in __reloc_gpu_alloc()
1094 err = PTR_ERR(batch); in __reloc_gpu_alloc()
1098 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); in __reloc_gpu_alloc()
1121 batch->node.start, PAGE_SIZE, in __reloc_gpu_alloc()
1126 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); in __reloc_gpu_alloc()
1127 i915_vma_move_to_active(batch, rq, 0); in __reloc_gpu_alloc()
1128 reservation_object_lock(batch->resv, NULL); in __reloc_gpu_alloc()
1129 reservation_object_add_excl_fence(batch->resv, &rq->fence); in __reloc_gpu_alloc()
1130 reservation_object_unlock(batch->resv); in __reloc_gpu_alloc()
1131 i915_vma_unpin(batch); in __reloc_gpu_alloc()
1138 rq->batch = batch; in __reloc_gpu_alloc()
1150 i915_vma_unpin(batch); in __reloc_gpu_alloc()
1203 u32 *batch; in relocate_entry() local
1213 batch = reloc_gpu(eb, vma, len); in relocate_entry()
1214 if (IS_ERR(batch)) in relocate_entry()
1220 *batch++ = MI_STORE_DWORD_IMM_GEN4; in relocate_entry()
1221 *batch++ = lower_32_bits(addr); in relocate_entry()
1222 *batch++ = upper_32_bits(addr); in relocate_entry()
1223 *batch++ = lower_32_bits(target_offset); in relocate_entry()
1227 *batch++ = MI_STORE_DWORD_IMM_GEN4; in relocate_entry()
1228 *batch++ = lower_32_bits(addr); in relocate_entry()
1229 *batch++ = upper_32_bits(addr); in relocate_entry()
1230 *batch++ = upper_32_bits(target_offset); in relocate_entry()
1232 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; in relocate_entry()
1233 *batch++ = lower_32_bits(addr); in relocate_entry()
1234 *batch++ = upper_32_bits(addr); in relocate_entry()
1235 *batch++ = lower_32_bits(target_offset); in relocate_entry()
1236 *batch++ = upper_32_bits(target_offset); in relocate_entry()
1239 *batch++ = MI_STORE_DWORD_IMM_GEN4; in relocate_entry()
1240 *batch++ = 0; in relocate_entry()
1241 *batch++ = addr; in relocate_entry()
1242 *batch++ = target_offset; in relocate_entry()
1244 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in relocate_entry()
1245 *batch++ = 0; in relocate_entry()
1246 *batch++ = addr; in relocate_entry()
1247 *batch++ = target_offset; in relocate_entry()
1249 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; in relocate_entry()
1250 *batch++ = addr; in relocate_entry()
1251 *batch++ = target_offset; in relocate_entry()
1668 GEM_BUG_ON(!eb->batch); in eb_relocate_slow()
1927 eb->batch->obj, in eb_parse()
1981 eb->batch->node.start + in eb_submit()
2303 if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { in i915_gem_do_execbuffer()
2308 if (eb.batch_start_offset > eb.batch->size || in i915_gem_do_execbuffer()
2309 eb.batch_len > eb.batch->size - eb.batch_start_offset) { in i915_gem_do_execbuffer()
2336 eb.batch = vma; in i915_gem_do_execbuffer()
2341 eb.batch_len = eb.batch->size - eb.batch_start_offset; in i915_gem_do_execbuffer()
2360 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); in i915_gem_do_execbuffer()
2366 eb.batch = vma; in i915_gem_do_execbuffer()
2406 eb.request->batch = eb.batch; in i915_gem_do_execbuffer()
2430 i915_vma_unpin(eb.batch); in i915_gem_do_execbuffer()