xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_virt.c (revision e9b266a0d797ad5239c6b06d300df1acd89edd40)
1fb4d8502Sjsg /*
2fb4d8502Sjsg  * Copyright 2016 Advanced Micro Devices, Inc.
3fb4d8502Sjsg  *
4fb4d8502Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5fb4d8502Sjsg  * copy of this software and associated documentation files (the "Software"),
6fb4d8502Sjsg  * to deal in the Software without restriction, including without limitation
7fb4d8502Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fb4d8502Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9fb4d8502Sjsg  * Software is furnished to do so, subject to the following conditions:
10fb4d8502Sjsg  *
11fb4d8502Sjsg  * The above copyright notice and this permission notice shall be included in
12fb4d8502Sjsg  * all copies or substantial portions of the Software.
13fb4d8502Sjsg  *
14fb4d8502Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fb4d8502Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fb4d8502Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17fb4d8502Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fb4d8502Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fb4d8502Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fb4d8502Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
21fb4d8502Sjsg  *
22fb4d8502Sjsg  */
23fb4d8502Sjsg 
24c349dbc7Sjsg #include <linux/module.h>
25c349dbc7Sjsg 
261bb76ff1Sjsg #ifdef CONFIG_X86
271bb76ff1Sjsg #include <asm/hypervisor.h>
281bb76ff1Sjsg #endif
291bb76ff1Sjsg 
30c349dbc7Sjsg #include <drm/drm_drv.h>
31f543b0c1Sjsg #include <xen/xen.h>
32c349dbc7Sjsg 
33fb4d8502Sjsg #include "amdgpu.h"
34ad8b1aafSjsg #include "amdgpu_ras.h"
35ad8b1aafSjsg #include "vi.h"
36ad8b1aafSjsg #include "soc15.h"
37ad8b1aafSjsg #include "nv.h"
38ad8b1aafSjsg 
39ad8b1aafSjsg #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
40ad8b1aafSjsg 	do { \
41ad8b1aafSjsg 		vf2pf_info->ucode_info[ucode].id = ucode; \
42ad8b1aafSjsg 		vf2pf_info->ucode_info[ucode].version = ver; \
43ad8b1aafSjsg 	} while (0)
44fb4d8502Sjsg 
45fb4d8502Sjsg bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
46fb4d8502Sjsg {
47fb4d8502Sjsg 	/* By now all MMIO pages except mailbox are blocked */
48fb4d8502Sjsg 	/* if blocking is enabled in hypervisor. Choose the */
49fb4d8502Sjsg 	/* SCRATCH_REG0 to test. */
50fb4d8502Sjsg 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
51fb4d8502Sjsg }
52fb4d8502Sjsg 
53fb4d8502Sjsg void amdgpu_virt_init_setting(struct amdgpu_device *adev)
54fb4d8502Sjsg {
555ca02815Sjsg 	struct drm_device *ddev = adev_to_drm(adev);
565ca02815Sjsg 
57fb4d8502Sjsg 	/* enable virtual display */
585ca02815Sjsg 	if (adev->asic_type != CHIP_ALDEBARAN &&
59f005ef32Sjsg 	    adev->asic_type != CHIP_ARCTURUS &&
60f005ef32Sjsg 	    ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
61ad8b1aafSjsg 		if (adev->mode_info.num_crtc == 0)
62fb4d8502Sjsg 			adev->mode_info.num_crtc = 1;
63fb4d8502Sjsg 		adev->enable_virtual_display = true;
645ca02815Sjsg 	}
655ca02815Sjsg 	ddev->driver_features &= ~DRIVER_ATOMIC;
66fb4d8502Sjsg 	adev->cg_flags = 0;
67fb4d8502Sjsg 	adev->pg_flags = 0;
68f005ef32Sjsg 
69f005ef32Sjsg 	/* Reduce kcq number to 2 to reduce latency */
70f005ef32Sjsg 	if (amdgpu_num_kcq == -1)
71f005ef32Sjsg 		amdgpu_num_kcq = 2;
72fb4d8502Sjsg }
73fb4d8502Sjsg 
74c349dbc7Sjsg void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
75c349dbc7Sjsg 					uint32_t reg0, uint32_t reg1,
76c349dbc7Sjsg 					uint32_t ref, uint32_t mask)
77fb4d8502Sjsg {
78f005ef32Sjsg 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
79c349dbc7Sjsg 	struct amdgpu_ring *ring = &kiq->ring;
80fb4d8502Sjsg 	signed long r, cnt = 0;
81fb4d8502Sjsg 	unsigned long flags;
82fb4d8502Sjsg 	uint32_t seq;
83fb4d8502Sjsg 
841bb76ff1Sjsg 	if (adev->mes.ring.sched.ready) {
851bb76ff1Sjsg 		amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
861bb76ff1Sjsg 					      ref, mask);
871bb76ff1Sjsg 		return;
881bb76ff1Sjsg 	}
891bb76ff1Sjsg 
90fb4d8502Sjsg 	spin_lock_irqsave(&kiq->ring_lock, flags);
91fb4d8502Sjsg 	amdgpu_ring_alloc(ring, 32);
92c349dbc7Sjsg 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
93c349dbc7Sjsg 					    ref, mask);
94ad8b1aafSjsg 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
95ad8b1aafSjsg 	if (r)
96ad8b1aafSjsg 		goto failed_undo;
97ad8b1aafSjsg 
98fb4d8502Sjsg 	amdgpu_ring_commit(ring);
99fb4d8502Sjsg 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
100fb4d8502Sjsg 
101fb4d8502Sjsg 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
102fb4d8502Sjsg 
103c349dbc7Sjsg 	/* don't wait anymore for IRQ context */
104c349dbc7Sjsg 	if (r < 1 && in_interrupt())
105c349dbc7Sjsg 		goto failed_kiq;
106fb4d8502Sjsg 
107fb4d8502Sjsg 	might_sleep();
108fb4d8502Sjsg 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
109fb4d8502Sjsg 
110fb4d8502Sjsg 		drm_msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
111fb4d8502Sjsg 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
112fb4d8502Sjsg 	}
113fb4d8502Sjsg 
114fb4d8502Sjsg 	if (cnt > MAX_KIQ_REG_TRY)
115c349dbc7Sjsg 		goto failed_kiq;
116fb4d8502Sjsg 
117fb4d8502Sjsg 	return;
118fb4d8502Sjsg 
119ad8b1aafSjsg failed_undo:
120ad8b1aafSjsg 	amdgpu_ring_undo(ring);
121ad8b1aafSjsg 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
122c349dbc7Sjsg failed_kiq:
123ad8b1aafSjsg 	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
124fb4d8502Sjsg }
125fb4d8502Sjsg 
126fb4d8502Sjsg /**
127fb4d8502Sjsg  * amdgpu_virt_request_full_gpu() - request full gpu access
1285ca02815Sjsg  * @adev:	amdgpu device.
129fb4d8502Sjsg  * @init:	is driver init time.
130fb4d8502Sjsg  * When start to init/fini driver, first need to request full gpu access.
131fb4d8502Sjsg  * Return: Zero if request success, otherwise will return error.
132fb4d8502Sjsg  */
133fb4d8502Sjsg int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
134fb4d8502Sjsg {
135fb4d8502Sjsg 	struct amdgpu_virt *virt = &adev->virt;
136fb4d8502Sjsg 	int r;
137fb4d8502Sjsg 
138fb4d8502Sjsg 	if (virt->ops && virt->ops->req_full_gpu) {
139fb4d8502Sjsg 		r = virt->ops->req_full_gpu(adev, init);
140*e9b266a0Sjsg 		if (r) {
141*e9b266a0Sjsg 			adev->no_hw_access = true;
142fb4d8502Sjsg 			return r;
143*e9b266a0Sjsg 		}
144fb4d8502Sjsg 
145fb4d8502Sjsg 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
146fb4d8502Sjsg 	}
147fb4d8502Sjsg 
148fb4d8502Sjsg 	return 0;
149fb4d8502Sjsg }
150fb4d8502Sjsg 
151fb4d8502Sjsg /**
152fb4d8502Sjsg  * amdgpu_virt_release_full_gpu() - release full gpu access
1535ca02815Sjsg  * @adev:	amdgpu device.
154fb4d8502Sjsg  * @init:	is driver init time.
155fb4d8502Sjsg  * When finishing driver init/fini, need to release full gpu access.
156fb4d8502Sjsg  * Return: Zero if release success, otherwise will returen error.
157fb4d8502Sjsg  */
158fb4d8502Sjsg int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
159fb4d8502Sjsg {
160fb4d8502Sjsg 	struct amdgpu_virt *virt = &adev->virt;
161fb4d8502Sjsg 	int r;
162fb4d8502Sjsg 
163fb4d8502Sjsg 	if (virt->ops && virt->ops->rel_full_gpu) {
164fb4d8502Sjsg 		r = virt->ops->rel_full_gpu(adev, init);
165fb4d8502Sjsg 		if (r)
166fb4d8502Sjsg 			return r;
167fb4d8502Sjsg 
168fb4d8502Sjsg 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
169fb4d8502Sjsg 	}
170fb4d8502Sjsg 	return 0;
171fb4d8502Sjsg }
172fb4d8502Sjsg 
173fb4d8502Sjsg /**
174fb4d8502Sjsg  * amdgpu_virt_reset_gpu() - reset gpu
1755ca02815Sjsg  * @adev:	amdgpu device.
176fb4d8502Sjsg  * Send reset command to GPU hypervisor to reset GPU that VM is using
177fb4d8502Sjsg  * Return: Zero if reset success, otherwise will return error.
178fb4d8502Sjsg  */
179fb4d8502Sjsg int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
180fb4d8502Sjsg {
181fb4d8502Sjsg 	struct amdgpu_virt *virt = &adev->virt;
182fb4d8502Sjsg 	int r;
183fb4d8502Sjsg 
184fb4d8502Sjsg 	if (virt->ops && virt->ops->reset_gpu) {
185fb4d8502Sjsg 		r = virt->ops->reset_gpu(adev);
186fb4d8502Sjsg 		if (r)
187fb4d8502Sjsg 			return r;
188fb4d8502Sjsg 
189fb4d8502Sjsg 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
190fb4d8502Sjsg 	}
191fb4d8502Sjsg 
192fb4d8502Sjsg 	return 0;
193fb4d8502Sjsg }
194fb4d8502Sjsg 
195ad8b1aafSjsg void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
196ad8b1aafSjsg {
197ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
198ad8b1aafSjsg 
199ad8b1aafSjsg 	if (virt->ops && virt->ops->req_init_data)
200ad8b1aafSjsg 		virt->ops->req_init_data(adev);
201ad8b1aafSjsg 
202ad8b1aafSjsg 	if (adev->virt.req_init_data_ver > 0)
203ad8b1aafSjsg 		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
204ad8b1aafSjsg 	else
205ad8b1aafSjsg 		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
206ad8b1aafSjsg }
207ad8b1aafSjsg 
208fb4d8502Sjsg /**
209fb4d8502Sjsg  * amdgpu_virt_wait_reset() - wait for reset gpu completed
2105ca02815Sjsg  * @adev:	amdgpu device.
211fb4d8502Sjsg  * Wait for GPU reset completed.
212fb4d8502Sjsg  * Return: Zero if reset success, otherwise will return error.
213fb4d8502Sjsg  */
214fb4d8502Sjsg int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
215fb4d8502Sjsg {
216fb4d8502Sjsg 	struct amdgpu_virt *virt = &adev->virt;
217fb4d8502Sjsg 
218fb4d8502Sjsg 	if (!virt->ops || !virt->ops->wait_reset)
219fb4d8502Sjsg 		return -EINVAL;
220fb4d8502Sjsg 
221fb4d8502Sjsg 	return virt->ops->wait_reset(adev);
222fb4d8502Sjsg }
223fb4d8502Sjsg 
224fb4d8502Sjsg /**
225fb4d8502Sjsg  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
2265ca02815Sjsg  * @adev:	amdgpu device.
227fb4d8502Sjsg  * MM table is used by UVD and VCE for its initialization
228fb4d8502Sjsg  * Return: Zero if allocate success.
229fb4d8502Sjsg  */
230fb4d8502Sjsg int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
231fb4d8502Sjsg {
232fb4d8502Sjsg 	int r;
233fb4d8502Sjsg 
234fb4d8502Sjsg 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
235fb4d8502Sjsg 		return 0;
236fb4d8502Sjsg 
237fb4d8502Sjsg 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
238f005ef32Sjsg 				    AMDGPU_GEM_DOMAIN_VRAM |
239f005ef32Sjsg 				    AMDGPU_GEM_DOMAIN_GTT,
240fb4d8502Sjsg 				    &adev->virt.mm_table.bo,
241fb4d8502Sjsg 				    &adev->virt.mm_table.gpu_addr,
242fb4d8502Sjsg 				    (void *)&adev->virt.mm_table.cpu_addr);
243fb4d8502Sjsg 	if (r) {
244fb4d8502Sjsg 		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
245fb4d8502Sjsg 		return r;
246fb4d8502Sjsg 	}
247fb4d8502Sjsg 
248fb4d8502Sjsg 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
249fb4d8502Sjsg 	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
250fb4d8502Sjsg 		 adev->virt.mm_table.gpu_addr,
251fb4d8502Sjsg 		 adev->virt.mm_table.cpu_addr);
252fb4d8502Sjsg 	return 0;
253fb4d8502Sjsg }
254fb4d8502Sjsg 
255fb4d8502Sjsg /**
256fb4d8502Sjsg  * amdgpu_virt_free_mm_table() - free mm table memory
2575ca02815Sjsg  * @adev:	amdgpu device.
258fb4d8502Sjsg  * Free MM table memory
259fb4d8502Sjsg  */
260fb4d8502Sjsg void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
261fb4d8502Sjsg {
262fb4d8502Sjsg 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
263fb4d8502Sjsg 		return;
264fb4d8502Sjsg 
265fb4d8502Sjsg 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
266fb4d8502Sjsg 			      &adev->virt.mm_table.gpu_addr,
267fb4d8502Sjsg 			      (void *)&adev->virt.mm_table.cpu_addr);
268fb4d8502Sjsg 	adev->virt.mm_table.gpu_addr = 0;
269fb4d8502Sjsg }
270fb4d8502Sjsg 
271fb4d8502Sjsg 
272ad8b1aafSjsg unsigned int amd_sriov_msg_checksum(void *obj,
273fb4d8502Sjsg 				unsigned long obj_size,
274fb4d8502Sjsg 				unsigned int key,
275ad8b1aafSjsg 				unsigned int checksum)
276fb4d8502Sjsg {
277fb4d8502Sjsg 	unsigned int ret = key;
278fb4d8502Sjsg 	unsigned long i = 0;
279fb4d8502Sjsg 	unsigned char *pos;
280fb4d8502Sjsg 
281fb4d8502Sjsg 	pos = (char *)obj;
282fb4d8502Sjsg 	/* calculate checksum */
283fb4d8502Sjsg 	for (i = 0; i < obj_size; ++i)
284fb4d8502Sjsg 		ret += *(pos + i);
285ad8b1aafSjsg 	/* minus the checksum itself */
286ad8b1aafSjsg 	pos = (char *)&checksum;
287ad8b1aafSjsg 	for (i = 0; i < sizeof(checksum); ++i)
288fb4d8502Sjsg 		ret -= *(pos + i);
289fb4d8502Sjsg 	return ret;
290fb4d8502Sjsg }
291fb4d8502Sjsg 
292ad8b1aafSjsg static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
293ad8b1aafSjsg {
294ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
295ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
296ad8b1aafSjsg 	/* GPU will be marked bad on host if bp count more then 10,
297ad8b1aafSjsg 	 * so alloc 512 is enough.
298ad8b1aafSjsg 	 */
299ad8b1aafSjsg 	unsigned int align_space = 512;
300ad8b1aafSjsg 	void *bps = NULL;
301ad8b1aafSjsg 	struct amdgpu_bo **bps_bo = NULL;
302ad8b1aafSjsg 
303ad8b1aafSjsg 	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
304ad8b1aafSjsg 	if (!*data)
3051bb76ff1Sjsg 		goto data_failure;
306ad8b1aafSjsg 
3075ca02815Sjsg 	bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
3081bb76ff1Sjsg 	if (!bps)
3091bb76ff1Sjsg 		goto bps_failure;
310ad8b1aafSjsg 
3111bb76ff1Sjsg 	bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
3121bb76ff1Sjsg 	if (!bps_bo)
3131bb76ff1Sjsg 		goto bps_bo_failure;
314ad8b1aafSjsg 
315ad8b1aafSjsg 	(*data)->bps = bps;
316ad8b1aafSjsg 	(*data)->bps_bo = bps_bo;
317ad8b1aafSjsg 	(*data)->count = 0;
318ad8b1aafSjsg 	(*data)->last_reserved = 0;
319ad8b1aafSjsg 
320ad8b1aafSjsg 	virt->ras_init_done = true;
321ad8b1aafSjsg 
322ad8b1aafSjsg 	return 0;
3231bb76ff1Sjsg 
3241bb76ff1Sjsg bps_bo_failure:
3251bb76ff1Sjsg 	kfree(bps);
3261bb76ff1Sjsg bps_failure:
3271bb76ff1Sjsg 	kfree(*data);
3281bb76ff1Sjsg data_failure:
3291bb76ff1Sjsg 	return -ENOMEM;
330ad8b1aafSjsg }
331ad8b1aafSjsg 
332ad8b1aafSjsg static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
333ad8b1aafSjsg {
334ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
335ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
336ad8b1aafSjsg 	struct amdgpu_bo *bo;
337ad8b1aafSjsg 	int i;
338ad8b1aafSjsg 
339ad8b1aafSjsg 	if (!data)
340ad8b1aafSjsg 		return;
341ad8b1aafSjsg 
342ad8b1aafSjsg 	for (i = data->last_reserved - 1; i >= 0; i--) {
343ad8b1aafSjsg 		bo = data->bps_bo[i];
344ad8b1aafSjsg 		amdgpu_bo_free_kernel(&bo, NULL, NULL);
345ad8b1aafSjsg 		data->bps_bo[i] = bo;
346ad8b1aafSjsg 		data->last_reserved = i;
347ad8b1aafSjsg 	}
348ad8b1aafSjsg }
349ad8b1aafSjsg 
350ad8b1aafSjsg void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
351ad8b1aafSjsg {
352ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
353ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
354ad8b1aafSjsg 
355ad8b1aafSjsg 	virt->ras_init_done = false;
356ad8b1aafSjsg 
357ad8b1aafSjsg 	if (!data)
358ad8b1aafSjsg 		return;
359ad8b1aafSjsg 
360ad8b1aafSjsg 	amdgpu_virt_ras_release_bp(adev);
361ad8b1aafSjsg 
362ad8b1aafSjsg 	kfree(data->bps);
363ad8b1aafSjsg 	kfree(data->bps_bo);
364ad8b1aafSjsg 	kfree(data);
365ad8b1aafSjsg 	virt->virt_eh_data = NULL;
366ad8b1aafSjsg }
367ad8b1aafSjsg 
368ad8b1aafSjsg static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
369ad8b1aafSjsg 		struct eeprom_table_record *bps, int pages)
370ad8b1aafSjsg {
371ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
372ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
373ad8b1aafSjsg 
374ad8b1aafSjsg 	if (!data)
375ad8b1aafSjsg 		return;
376ad8b1aafSjsg 
377ad8b1aafSjsg 	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
378ad8b1aafSjsg 	data->count += pages;
379ad8b1aafSjsg }
380ad8b1aafSjsg 
381ad8b1aafSjsg static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
382ad8b1aafSjsg {
383ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
384ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
385ad8b1aafSjsg 	struct amdgpu_bo *bo = NULL;
386ad8b1aafSjsg 	uint64_t bp;
387ad8b1aafSjsg 	int i;
388ad8b1aafSjsg 
389ad8b1aafSjsg 	if (!data)
390ad8b1aafSjsg 		return;
391ad8b1aafSjsg 
392ad8b1aafSjsg 	for (i = data->last_reserved; i < data->count; i++) {
393ad8b1aafSjsg 		bp = data->bps[i].retired_page;
394ad8b1aafSjsg 
395ad8b1aafSjsg 		/* There are two cases of reserve error should be ignored:
396ad8b1aafSjsg 		 * 1) a ras bad page has been allocated (used by someone);
397ad8b1aafSjsg 		 * 2) a ras bad page has been reserved (duplicate error injection
398ad8b1aafSjsg 		 *    for one page);
399ad8b1aafSjsg 		 */
400ad8b1aafSjsg 		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
401ad8b1aafSjsg 					       AMDGPU_GPU_PAGE_SIZE,
402ad8b1aafSjsg 					       &bo, NULL))
403ad8b1aafSjsg 			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
404ad8b1aafSjsg 
405ad8b1aafSjsg 		data->bps_bo[i] = bo;
406ad8b1aafSjsg 		data->last_reserved = i + 1;
407ad8b1aafSjsg 		bo = NULL;
408ad8b1aafSjsg 	}
409ad8b1aafSjsg }
410ad8b1aafSjsg 
411ad8b1aafSjsg static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
412ad8b1aafSjsg 		uint64_t retired_page)
413ad8b1aafSjsg {
414ad8b1aafSjsg 	struct amdgpu_virt *virt = &adev->virt;
415ad8b1aafSjsg 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
416ad8b1aafSjsg 	int i;
417ad8b1aafSjsg 
418ad8b1aafSjsg 	if (!data)
419ad8b1aafSjsg 		return true;
420ad8b1aafSjsg 
421ad8b1aafSjsg 	for (i = 0; i < data->count; i++)
422ad8b1aafSjsg 		if (retired_page == data->bps[i].retired_page)
423ad8b1aafSjsg 			return true;
424ad8b1aafSjsg 
425ad8b1aafSjsg 	return false;
426ad8b1aafSjsg }
427ad8b1aafSjsg 
428ad8b1aafSjsg static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
429ad8b1aafSjsg 		uint64_t bp_block_offset, uint32_t bp_block_size)
430ad8b1aafSjsg {
431ad8b1aafSjsg 	struct eeprom_table_record bp;
432ad8b1aafSjsg 	uint64_t retired_page;
433ad8b1aafSjsg 	uint32_t bp_idx, bp_cnt;
434f005ef32Sjsg 	void *vram_usage_va = NULL;
435f005ef32Sjsg 
436f005ef32Sjsg 	if (adev->mman.fw_vram_usage_va)
437f005ef32Sjsg 		vram_usage_va = adev->mman.fw_vram_usage_va;
438f005ef32Sjsg 	else
439f005ef32Sjsg 		vram_usage_va = adev->mman.drv_vram_usage_va;
440ad8b1aafSjsg 
441ad8b1aafSjsg 	if (bp_block_size) {
442ad8b1aafSjsg 		bp_cnt = bp_block_size / sizeof(uint64_t);
443ad8b1aafSjsg 		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
444f005ef32Sjsg 			retired_page = *(uint64_t *)(vram_usage_va +
445ad8b1aafSjsg 					bp_block_offset + bp_idx * sizeof(uint64_t));
446ad8b1aafSjsg 			bp.retired_page = retired_page;
447ad8b1aafSjsg 
448ad8b1aafSjsg 			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
449ad8b1aafSjsg 				continue;
450ad8b1aafSjsg 
451ad8b1aafSjsg 			amdgpu_virt_ras_add_bps(adev, &bp, 1);
452ad8b1aafSjsg 
453ad8b1aafSjsg 			amdgpu_virt_ras_reserve_bps(adev);
454ad8b1aafSjsg 		}
455ad8b1aafSjsg 	}
456ad8b1aafSjsg }
457ad8b1aafSjsg 
458ad8b1aafSjsg static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
459ad8b1aafSjsg {
460ad8b1aafSjsg 	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
461ad8b1aafSjsg 	uint32_t checksum;
462ad8b1aafSjsg 	uint32_t checkval;
463ad8b1aafSjsg 
4645ca02815Sjsg 	uint32_t i;
4655ca02815Sjsg 	uint32_t tmp;
4665ca02815Sjsg 
467ad8b1aafSjsg 	if (adev->virt.fw_reserve.p_pf2vf == NULL)
468ad8b1aafSjsg 		return -EINVAL;
469ad8b1aafSjsg 
470ad8b1aafSjsg 	if (pf2vf_info->size > 1024) {
471ad8b1aafSjsg 		DRM_ERROR("invalid pf2vf message size\n");
472ad8b1aafSjsg 		return -EINVAL;
473ad8b1aafSjsg 	}
474ad8b1aafSjsg 
475ad8b1aafSjsg 	switch (pf2vf_info->version) {
476ad8b1aafSjsg 	case 1:
477ad8b1aafSjsg 		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
478ad8b1aafSjsg 		checkval = amd_sriov_msg_checksum(
479ad8b1aafSjsg 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
480ad8b1aafSjsg 			adev->virt.fw_reserve.checksum_key, checksum);
481ad8b1aafSjsg 		if (checksum != checkval) {
482ad8b1aafSjsg 			DRM_ERROR("invalid pf2vf message\n");
483ad8b1aafSjsg 			return -EINVAL;
484ad8b1aafSjsg 		}
485ad8b1aafSjsg 
486ad8b1aafSjsg 		adev->virt.gim_feature =
487ad8b1aafSjsg 			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
488ad8b1aafSjsg 		break;
489ad8b1aafSjsg 	case 2:
490ad8b1aafSjsg 		/* TODO: missing key, need to add it later */
491ad8b1aafSjsg 		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
492ad8b1aafSjsg 		checkval = amd_sriov_msg_checksum(
493ad8b1aafSjsg 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
494ad8b1aafSjsg 			0, checksum);
495ad8b1aafSjsg 		if (checksum != checkval) {
496ad8b1aafSjsg 			DRM_ERROR("invalid pf2vf message\n");
497ad8b1aafSjsg 			return -EINVAL;
498ad8b1aafSjsg 		}
499ad8b1aafSjsg 
500ad8b1aafSjsg 		adev->virt.vf2pf_update_interval_ms =
501ad8b1aafSjsg 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
502ad8b1aafSjsg 		adev->virt.gim_feature =
503ad8b1aafSjsg 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
5045ca02815Sjsg 		adev->virt.reg_access =
5055ca02815Sjsg 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
506ad8b1aafSjsg 
5075ca02815Sjsg 		adev->virt.decode_max_dimension_pixels = 0;
5085ca02815Sjsg 		adev->virt.decode_max_frame_pixels = 0;
5095ca02815Sjsg 		adev->virt.encode_max_dimension_pixels = 0;
5105ca02815Sjsg 		adev->virt.encode_max_frame_pixels = 0;
5115ca02815Sjsg 		adev->virt.is_mm_bw_enabled = false;
5125ca02815Sjsg 		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
5135ca02815Sjsg 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
5145ca02815Sjsg 			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
5155ca02815Sjsg 
5165ca02815Sjsg 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
5175ca02815Sjsg 			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
5185ca02815Sjsg 
5195ca02815Sjsg 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
5205ca02815Sjsg 			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
5215ca02815Sjsg 
5225ca02815Sjsg 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
5235ca02815Sjsg 			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
5245ca02815Sjsg 		}
5255ca02815Sjsg 		if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
5265ca02815Sjsg 			adev->virt.is_mm_bw_enabled = true;
5275ca02815Sjsg 
5285ca02815Sjsg 		adev->unique_id =
5295ca02815Sjsg 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
530ad8b1aafSjsg 		break;
531ad8b1aafSjsg 	default:
532ad8b1aafSjsg 		DRM_ERROR("invalid pf2vf version\n");
533ad8b1aafSjsg 		return -EINVAL;
534ad8b1aafSjsg 	}
535ad8b1aafSjsg 
536ad8b1aafSjsg 	/* correct too large or too little interval value */
537ad8b1aafSjsg 	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
538ad8b1aafSjsg 		adev->virt.vf2pf_update_interval_ms = 2000;
539ad8b1aafSjsg 
540ad8b1aafSjsg 	return 0;
541ad8b1aafSjsg }
542ad8b1aafSjsg 
543ad8b1aafSjsg static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
544ad8b1aafSjsg {
545ad8b1aafSjsg 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
546ad8b1aafSjsg 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
547ad8b1aafSjsg 
548ad8b1aafSjsg 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
549ad8b1aafSjsg 		return;
550ad8b1aafSjsg 
551ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
552ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
553ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
554ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
555ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
556ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
557ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
558ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
559ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
560ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
561ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
562ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
5635ca02815Sjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
5641bb76ff1Sjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
5651bb76ff1Sjsg 			    adev->psp.asd_context.bin_desc.fw_version);
5661bb76ff1Sjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
5671bb76ff1Sjsg 			    adev->psp.ras_context.context.bin_desc.fw_version);
5681bb76ff1Sjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
5691bb76ff1Sjsg 			    adev->psp.xgmi_context.context.bin_desc.fw_version);
570ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
571ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
572ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
573ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
574ad8b1aafSjsg 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
575ad8b1aafSjsg }
576ad8b1aafSjsg 
577ad8b1aafSjsg static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
578ad8b1aafSjsg {
579ad8b1aafSjsg 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
580ad8b1aafSjsg 
581ad8b1aafSjsg 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
582ad8b1aafSjsg 
583ad8b1aafSjsg 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
584ad8b1aafSjsg 		return -EINVAL;
585ad8b1aafSjsg 
586ad8b1aafSjsg 	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
587ad8b1aafSjsg 
588ad8b1aafSjsg 	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
589ad8b1aafSjsg 	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
590ad8b1aafSjsg 
591ad8b1aafSjsg #ifdef MODULE
592ad8b1aafSjsg 	if (THIS_MODULE->version != NULL)
593ad8b1aafSjsg 		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
594ad8b1aafSjsg 	else
595ad8b1aafSjsg #endif
596ad8b1aafSjsg 		strlcpy(vf2pf_info->driver_version, "N/A",
597ad8b1aafSjsg 		    sizeof(vf2pf_info->driver_version));
598ad8b1aafSjsg 
599ad8b1aafSjsg 	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
600ad8b1aafSjsg 	vf2pf_info->driver_cert = 0;
601ad8b1aafSjsg 	vf2pf_info->os_info.all = 0;
602ad8b1aafSjsg 
6031bb76ff1Sjsg 	vf2pf_info->fb_usage =
6041bb76ff1Sjsg 		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
6051bb76ff1Sjsg 	vf2pf_info->fb_vis_usage =
6061bb76ff1Sjsg 		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
607ad8b1aafSjsg 	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
608ad8b1aafSjsg 	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
609ad8b1aafSjsg 
610ad8b1aafSjsg 	amdgpu_virt_populate_vf2pf_ucode_info(adev);
611ad8b1aafSjsg 
612ad8b1aafSjsg 	/* TODO: read dynamic info */
613ad8b1aafSjsg 	vf2pf_info->gfx_usage = 0;
614ad8b1aafSjsg 	vf2pf_info->compute_usage = 0;
615ad8b1aafSjsg 	vf2pf_info->encode_usage = 0;
616ad8b1aafSjsg 	vf2pf_info->decode_usage = 0;
617ad8b1aafSjsg 
6181bb76ff1Sjsg 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
619ad8b1aafSjsg 	vf2pf_info->checksum =
620ad8b1aafSjsg 		amd_sriov_msg_checksum(
621e361f482Sjsg 		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
622ad8b1aafSjsg 
623ad8b1aafSjsg 	return 0;
624ad8b1aafSjsg }
625ad8b1aafSjsg 
6265ca02815Sjsg static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
627ad8b1aafSjsg {
628ad8b1aafSjsg 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
629ad8b1aafSjsg 	int ret;
630ad8b1aafSjsg 
631ad8b1aafSjsg 	ret = amdgpu_virt_read_pf2vf_data(adev);
632ad8b1aafSjsg 	if (ret)
633ad8b1aafSjsg 		goto out;
634ad8b1aafSjsg 	amdgpu_virt_write_vf2pf_data(adev);
635ad8b1aafSjsg 
636ad8b1aafSjsg out:
637ad8b1aafSjsg 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
638ad8b1aafSjsg }
639ad8b1aafSjsg 
640ad8b1aafSjsg void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
641ad8b1aafSjsg {
642ad8b1aafSjsg 	if (adev->virt.vf2pf_update_interval_ms != 0) {
643ad8b1aafSjsg 		DRM_INFO("clean up the vf2pf work item\n");
644ad8b1aafSjsg 		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
6455ca02815Sjsg 		adev->virt.vf2pf_update_interval_ms = 0;
646ad8b1aafSjsg 	}
647ad8b1aafSjsg }
648ad8b1aafSjsg 
649fb4d8502Sjsg void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
650fb4d8502Sjsg {
651fb4d8502Sjsg 	adev->virt.fw_reserve.p_pf2vf = NULL;
652fb4d8502Sjsg 	adev->virt.fw_reserve.p_vf2pf = NULL;
653ad8b1aafSjsg 	adev->virt.vf2pf_update_interval_ms = 0;
654fb4d8502Sjsg 
655f005ef32Sjsg 	if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
656f005ef32Sjsg 		DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
657f005ef32Sjsg 	} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
65833331580Sjsg 		/* go through this logic in ip_init and reset to init workqueue*/
65933331580Sjsg 		amdgpu_virt_exchange_data(adev);
660ad8b1aafSjsg 
66133331580Sjsg 		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
66233331580Sjsg 		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
66333331580Sjsg 	} else if (adev->bios != NULL) {
66433331580Sjsg 		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
665fb4d8502Sjsg 		adev->virt.fw_reserve.p_pf2vf =
666ad8b1aafSjsg 			(struct amd_sriov_msg_pf2vf_info_header *)
66715f9b5f9Sjsg 			(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
66815f9b5f9Sjsg 
66915f9b5f9Sjsg 		amdgpu_virt_read_pf2vf_data(adev);
67015f9b5f9Sjsg 	}
67115f9b5f9Sjsg }
67215f9b5f9Sjsg 
67315f9b5f9Sjsg 
67415f9b5f9Sjsg void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
67515f9b5f9Sjsg {
67615f9b5f9Sjsg 	uint64_t bp_block_offset = 0;
67715f9b5f9Sjsg 	uint32_t bp_block_size = 0;
67815f9b5f9Sjsg 	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
67915f9b5f9Sjsg 
680f005ef32Sjsg 	if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
681f005ef32Sjsg 		if (adev->mman.fw_vram_usage_va) {
68215f9b5f9Sjsg 			adev->virt.fw_reserve.p_pf2vf =
68315f9b5f9Sjsg 				(struct amd_sriov_msg_pf2vf_info_header *)
684ad8b1aafSjsg 				(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
685fb4d8502Sjsg 			adev->virt.fw_reserve.p_vf2pf =
686ad8b1aafSjsg 				(struct amd_sriov_msg_vf2pf_info_header *)
687ad8b1aafSjsg 				(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
688f005ef32Sjsg 		} else if (adev->mman.drv_vram_usage_va) {
689f005ef32Sjsg 			adev->virt.fw_reserve.p_pf2vf =
690f005ef32Sjsg 				(struct amd_sriov_msg_pf2vf_info_header *)
691f005ef32Sjsg 				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
692f005ef32Sjsg 			adev->virt.fw_reserve.p_vf2pf =
693f005ef32Sjsg 				(struct amd_sriov_msg_vf2pf_info_header *)
694f005ef32Sjsg 				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
695f005ef32Sjsg 		}
696ad8b1aafSjsg 
697ad8b1aafSjsg 		amdgpu_virt_read_pf2vf_data(adev);
698ad8b1aafSjsg 		amdgpu_virt_write_vf2pf_data(adev);
699ad8b1aafSjsg 
700ad8b1aafSjsg 		/* bad page handling for version 2 */
701ad8b1aafSjsg 		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
702ad8b1aafSjsg 			pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
703ad8b1aafSjsg 
704ad8b1aafSjsg 			bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
705ad8b1aafSjsg 				((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
706ad8b1aafSjsg 			bp_block_size = pf2vf_v2->bp_block_size;
707ad8b1aafSjsg 
708ad8b1aafSjsg 			if (bp_block_size && !adev->virt.ras_init_done)
709ad8b1aafSjsg 				amdgpu_virt_init_ras_err_handler_data(adev);
710ad8b1aafSjsg 
711ad8b1aafSjsg 			if (adev->virt.ras_init_done)
712ad8b1aafSjsg 				amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
713ad8b1aafSjsg 		}
71415f9b5f9Sjsg 	}
715ad8b1aafSjsg }
716ad8b1aafSjsg 
717ad8b1aafSjsg void amdgpu_detect_virtualization(struct amdgpu_device *adev)
718ad8b1aafSjsg {
719ad8b1aafSjsg 	uint32_t reg;
720ad8b1aafSjsg 
721ad8b1aafSjsg 	switch (adev->asic_type) {
722ad8b1aafSjsg 	case CHIP_TONGA:
723ad8b1aafSjsg 	case CHIP_FIJI:
724ad8b1aafSjsg 		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
725ad8b1aafSjsg 		break;
726ad8b1aafSjsg 	case CHIP_VEGA10:
727ad8b1aafSjsg 	case CHIP_VEGA20:
728ad8b1aafSjsg 	case CHIP_NAVI10:
729ad8b1aafSjsg 	case CHIP_NAVI12:
730ad8b1aafSjsg 	case CHIP_SIENNA_CICHLID:
731ad8b1aafSjsg 	case CHIP_ARCTURUS:
7325ca02815Sjsg 	case CHIP_ALDEBARAN:
7331bb76ff1Sjsg 	case CHIP_IP_DISCOVERY:
734ad8b1aafSjsg 		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
735ad8b1aafSjsg 		break;
736ad8b1aafSjsg 	default: /* other chip doesn't support SRIOV */
737ad8b1aafSjsg 		reg = 0;
738ad8b1aafSjsg 		break;
739ad8b1aafSjsg 	}
740ad8b1aafSjsg 
741ad8b1aafSjsg 	if (reg & 1)
742ad8b1aafSjsg 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
743ad8b1aafSjsg 
744ad8b1aafSjsg 	if (reg & 0x80000000)
745ad8b1aafSjsg 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
746ad8b1aafSjsg 
747ad8b1aafSjsg 	if (!reg) {
748f543b0c1Sjsg 		/* passthrough mode exclus sriov mod */
749f543b0c1Sjsg 		if (is_virtual_machine() && !xen_initial_domain())
750ad8b1aafSjsg 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
751ad8b1aafSjsg 	}
752ad8b1aafSjsg 
7531bb76ff1Sjsg 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
7541bb76ff1Sjsg 		/* VF MMIO access (except mailbox range) from CPU
7551bb76ff1Sjsg 		 * will be blocked during sriov runtime
7561bb76ff1Sjsg 		 */
7571bb76ff1Sjsg 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
7581bb76ff1Sjsg 
759ad8b1aafSjsg 	/* we have the ability to check now */
760ad8b1aafSjsg 	if (amdgpu_sriov_vf(adev)) {
761ad8b1aafSjsg 		switch (adev->asic_type) {
762ad8b1aafSjsg 		case CHIP_TONGA:
763ad8b1aafSjsg 		case CHIP_FIJI:
764ad8b1aafSjsg 			vi_set_virt_ops(adev);
765ad8b1aafSjsg 			break;
766ad8b1aafSjsg 		case CHIP_VEGA10:
7671bb76ff1Sjsg 			soc15_set_virt_ops(adev);
7681bb76ff1Sjsg #ifdef CONFIG_X86
7691bb76ff1Sjsg 			/* not send GPU_INIT_DATA with MS_HYPERV*/
7701bb76ff1Sjsg 			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
7711bb76ff1Sjsg #endif
7721bb76ff1Sjsg 				/* send a dummy GPU_INIT_DATA request to host on vega10 */
7731bb76ff1Sjsg 				amdgpu_virt_request_init_data(adev);
7741bb76ff1Sjsg 			break;
775ad8b1aafSjsg 		case CHIP_VEGA20:
776ad8b1aafSjsg 		case CHIP_ARCTURUS:
7775ca02815Sjsg 		case CHIP_ALDEBARAN:
778ad8b1aafSjsg 			soc15_set_virt_ops(adev);
779ad8b1aafSjsg 			break;
780ad8b1aafSjsg 		case CHIP_NAVI10:
781ad8b1aafSjsg 		case CHIP_NAVI12:
782ad8b1aafSjsg 		case CHIP_SIENNA_CICHLID:
7831bb76ff1Sjsg 		case CHIP_IP_DISCOVERY:
784ad8b1aafSjsg 			nv_set_virt_ops(adev);
785ad8b1aafSjsg 			/* try send GPU_INIT_DATA request to host */
786ad8b1aafSjsg 			amdgpu_virt_request_init_data(adev);
787ad8b1aafSjsg 			break;
788ad8b1aafSjsg 		default: /* other chip doesn't support SRIOV */
789ad8b1aafSjsg 			DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
790ad8b1aafSjsg 			break;
791ad8b1aafSjsg 		}
792ad8b1aafSjsg 	}
793ad8b1aafSjsg }
794ad8b1aafSjsg 
795ad8b1aafSjsg static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
796ad8b1aafSjsg {
797ad8b1aafSjsg 	return amdgpu_sriov_is_debug(adev) ? true : false;
798ad8b1aafSjsg }
799ad8b1aafSjsg 
800ad8b1aafSjsg static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
801ad8b1aafSjsg {
802ad8b1aafSjsg 	return amdgpu_sriov_is_normal(adev) ? true : false;
803ad8b1aafSjsg }
804ad8b1aafSjsg 
805ad8b1aafSjsg int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
806ad8b1aafSjsg {
807ad8b1aafSjsg 	if (!amdgpu_sriov_vf(adev) ||
808ad8b1aafSjsg 	    amdgpu_virt_access_debugfs_is_kiq(adev))
809ad8b1aafSjsg 		return 0;
810ad8b1aafSjsg 
811ad8b1aafSjsg 	if (amdgpu_virt_access_debugfs_is_mmio(adev))
812ad8b1aafSjsg 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
813fb4d8502Sjsg 	else
814ad8b1aafSjsg 		return -EPERM;
815ad8b1aafSjsg 
816ad8b1aafSjsg 	return 0;
817fb4d8502Sjsg }
818ad8b1aafSjsg 
819ad8b1aafSjsg void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
820ad8b1aafSjsg {
821ad8b1aafSjsg 	if (amdgpu_sriov_vf(adev))
822ad8b1aafSjsg 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
823fb4d8502Sjsg }
824ad8b1aafSjsg 
825ad8b1aafSjsg enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
826ad8b1aafSjsg {
827ad8b1aafSjsg 	enum amdgpu_sriov_vf_mode mode;
828ad8b1aafSjsg 
829ad8b1aafSjsg 	if (amdgpu_sriov_vf(adev)) {
830ad8b1aafSjsg 		if (amdgpu_sriov_is_pp_one_vf(adev))
831ad8b1aafSjsg 			mode = SRIOV_VF_MODE_ONE_VF;
832ad8b1aafSjsg 		else
833ad8b1aafSjsg 			mode = SRIOV_VF_MODE_MULTI_VF;
834ad8b1aafSjsg 	} else {
835ad8b1aafSjsg 		mode = SRIOV_VF_MODE_BARE_METAL;
836fb4d8502Sjsg 	}
837ad8b1aafSjsg 
838ad8b1aafSjsg 	return mode;
839fb4d8502Sjsg }
8405ca02815Sjsg 
841f005ef32Sjsg void amdgpu_virt_post_reset(struct amdgpu_device *adev)
842f005ef32Sjsg {
843f005ef32Sjsg 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
844f005ef32Sjsg 		/* force set to GFXOFF state after reset,
845f005ef32Sjsg 		 * to avoid some invalid operation before GC enable
846f005ef32Sjsg 		 */
847f005ef32Sjsg 		adev->gfx.is_poweron = false;
848f005ef32Sjsg 	}
849f005ef32Sjsg }
850f005ef32Sjsg 
8511bb76ff1Sjsg bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
8521bb76ff1Sjsg {
8531bb76ff1Sjsg 	switch (adev->ip_versions[MP0_HWIP][0]) {
8541bb76ff1Sjsg 	case IP_VERSION(13, 0, 0):
8551bb76ff1Sjsg 		/* no vf autoload, white list */
8561bb76ff1Sjsg 		if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
8571bb76ff1Sjsg 		    ucode_id == AMDGPU_UCODE_ID_VCN)
8581bb76ff1Sjsg 			return false;
8591bb76ff1Sjsg 		else
8601bb76ff1Sjsg 			return true;
861f005ef32Sjsg 	case IP_VERSION(11, 0, 9):
862f005ef32Sjsg 	case IP_VERSION(11, 0, 7):
863f005ef32Sjsg 		/* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
864f005ef32Sjsg 		if (ucode_id == AMDGPU_UCODE_ID_RLC_G
865f005ef32Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
866f005ef32Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
867f005ef32Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
868f005ef32Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
869f005ef32Sjsg 			return true;
870f005ef32Sjsg 		else
871f005ef32Sjsg 			return false;
8721bb76ff1Sjsg 	case IP_VERSION(13, 0, 10):
8731bb76ff1Sjsg 		/* white list */
8741bb76ff1Sjsg 		if (ucode_id == AMDGPU_UCODE_ID_CAP
8751bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
8761bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
8771bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
8781bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
8791bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
8801bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
8811bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
8821bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
8831bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
8841bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
8851bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
8861bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES
8871bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
8881bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1
8891bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
8901bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_VCN1
8911bb76ff1Sjsg 		|| ucode_id == AMDGPU_UCODE_ID_VCN)
8921bb76ff1Sjsg 			return false;
8931bb76ff1Sjsg 		else
8941bb76ff1Sjsg 			return true;
8951bb76ff1Sjsg 	default:
8961bb76ff1Sjsg 		/* lagacy black list */
8971bb76ff1Sjsg 		if (ucode_id == AMDGPU_UCODE_ID_SDMA0
8981bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA1
8991bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA2
9001bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA3
9011bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA4
9021bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA5
9031bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA6
9041bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SDMA7
9051bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_G
9061bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
9071bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
9081bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
9091bb76ff1Sjsg 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
9101bb76ff1Sjsg 			return true;
9111bb76ff1Sjsg 		else
9121bb76ff1Sjsg 			return false;
9131bb76ff1Sjsg 	}
9141bb76ff1Sjsg }
9151bb76ff1Sjsg 
9165ca02815Sjsg void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
9175ca02815Sjsg 			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
9185ca02815Sjsg 			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
9195ca02815Sjsg {
9205ca02815Sjsg 	uint32_t i;
9215ca02815Sjsg 
9225ca02815Sjsg 	if (!adev->virt.is_mm_bw_enabled)
9235ca02815Sjsg 		return;
9245ca02815Sjsg 
9255ca02815Sjsg 	if (encode) {
9265ca02815Sjsg 		for (i = 0; i < encode_array_size; i++) {
9275ca02815Sjsg 			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
9285ca02815Sjsg 			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
9295ca02815Sjsg 			if (encode[i].max_width > 0)
9305ca02815Sjsg 				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
9315ca02815Sjsg 			else
9325ca02815Sjsg 				encode[i].max_height = 0;
9335ca02815Sjsg 		}
9345ca02815Sjsg 	}
9355ca02815Sjsg 
9365ca02815Sjsg 	if (decode) {
9375ca02815Sjsg 		for (i = 0; i < decode_array_size; i++) {
9385ca02815Sjsg 			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
9395ca02815Sjsg 			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
9405ca02815Sjsg 			if (decode[i].max_width > 0)
9415ca02815Sjsg 				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
9425ca02815Sjsg 			else
9435ca02815Sjsg 				decode[i].max_height = 0;
9445ca02815Sjsg 		}
9455ca02815Sjsg 	}
9465ca02815Sjsg }
9471bb76ff1Sjsg 
9481bb76ff1Sjsg static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
9491bb76ff1Sjsg 						 u32 acc_flags, u32 hwip,
9501bb76ff1Sjsg 						 bool write, u32 *rlcg_flag)
9511bb76ff1Sjsg {
9521bb76ff1Sjsg 	bool ret = false;
9531bb76ff1Sjsg 
9541bb76ff1Sjsg 	switch (hwip) {
9551bb76ff1Sjsg 	case GC_HWIP:
9561bb76ff1Sjsg 		if (amdgpu_sriov_reg_indirect_gc(adev)) {
9571bb76ff1Sjsg 			*rlcg_flag =
9581bb76ff1Sjsg 				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
9591bb76ff1Sjsg 			ret = true;
9601bb76ff1Sjsg 		/* only in new version, AMDGPU_REGS_NO_KIQ and
9611bb76ff1Sjsg 		 * AMDGPU_REGS_RLC are enabled simultaneously */
9621bb76ff1Sjsg 		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
9631bb76ff1Sjsg 				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
9641bb76ff1Sjsg 			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
9651bb76ff1Sjsg 			ret = true;
9661bb76ff1Sjsg 		}
9671bb76ff1Sjsg 		break;
9681bb76ff1Sjsg 	case MMHUB_HWIP:
9691bb76ff1Sjsg 		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
9701bb76ff1Sjsg 		    (acc_flags & AMDGPU_REGS_RLC) && write) {
9711bb76ff1Sjsg 			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
9721bb76ff1Sjsg 			ret = true;
9731bb76ff1Sjsg 		}
9741bb76ff1Sjsg 		break;
9751bb76ff1Sjsg 	default:
9761bb76ff1Sjsg 		break;
9771bb76ff1Sjsg 	}
9781bb76ff1Sjsg 	return ret;
9791bb76ff1Sjsg }
9801bb76ff1Sjsg 
981f005ef32Sjsg static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
9821bb76ff1Sjsg {
9831bb76ff1Sjsg 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
9841bb76ff1Sjsg 	uint32_t timeout = 50000;
9851bb76ff1Sjsg 	uint32_t i, tmp;
9861bb76ff1Sjsg 	uint32_t ret = 0;
9871bb76ff1Sjsg 	void *scratch_reg0;
9881bb76ff1Sjsg 	void *scratch_reg1;
9891bb76ff1Sjsg 	void *scratch_reg2;
9901bb76ff1Sjsg 	void *scratch_reg3;
9911bb76ff1Sjsg 	void *spare_int;
9921bb76ff1Sjsg 
9931bb76ff1Sjsg 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
9941bb76ff1Sjsg 		dev_err(adev->dev,
9951bb76ff1Sjsg 			"indirect registers access through rlcg is not available\n");
9961bb76ff1Sjsg 		return 0;
9971bb76ff1Sjsg 	}
9981bb76ff1Sjsg 
999f005ef32Sjsg 	if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1000f005ef32Sjsg 		dev_err(adev->dev, "invalid xcc\n");
1001f005ef32Sjsg 		return 0;
1002f005ef32Sjsg 	}
1003f005ef32Sjsg 
100471d2b8bbSjsg 	if (amdgpu_device_skip_hw_access(adev))
100571d2b8bbSjsg 		return 0;
100671d2b8bbSjsg 
1007f005ef32Sjsg 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
10081bb76ff1Sjsg 	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
10091bb76ff1Sjsg 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
10101bb76ff1Sjsg 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
10111bb76ff1Sjsg 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
10126f63516cSjsg 
10136f63516cSjsg 	mutex_lock(&adev->virt.rlcg_reg_lock);
10146f63516cSjsg 
10151bb76ff1Sjsg 	if (reg_access_ctrl->spare_int)
10161bb76ff1Sjsg 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
10171bb76ff1Sjsg 
10181bb76ff1Sjsg 	if (offset == reg_access_ctrl->grbm_cntl) {
10191bb76ff1Sjsg 		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
10201bb76ff1Sjsg 		writel(v, scratch_reg2);
1021f005ef32Sjsg 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
10221bb76ff1Sjsg 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
10231bb76ff1Sjsg 	} else if (offset == reg_access_ctrl->grbm_idx) {
10241bb76ff1Sjsg 		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
10251bb76ff1Sjsg 		writel(v, scratch_reg3);
1026f005ef32Sjsg 		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
10271bb76ff1Sjsg 			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
10281bb76ff1Sjsg 	} else {
10291bb76ff1Sjsg 		/*
10301bb76ff1Sjsg 		 * SCRATCH_REG0 	= read/write value
10311bb76ff1Sjsg 		 * SCRATCH_REG1[30:28]	= command
10321bb76ff1Sjsg 		 * SCRATCH_REG1[19:0]	= address in dword
10331bb76ff1Sjsg 		 * SCRATCH_REG1[26:24]	= Error reporting
10341bb76ff1Sjsg 		 */
10351bb76ff1Sjsg 		writel(v, scratch_reg0);
10361bb76ff1Sjsg 		writel((offset | flag), scratch_reg1);
10371bb76ff1Sjsg 		if (reg_access_ctrl->spare_int)
10381bb76ff1Sjsg 			writel(1, spare_int);
10391bb76ff1Sjsg 
10401bb76ff1Sjsg 		for (i = 0; i < timeout; i++) {
10411bb76ff1Sjsg 			tmp = readl(scratch_reg1);
10421bb76ff1Sjsg 			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
10431bb76ff1Sjsg 				break;
10441bb76ff1Sjsg 			udelay(10);
10451bb76ff1Sjsg 		}
10461bb76ff1Sjsg 
10471bb76ff1Sjsg 		if (i >= timeout) {
10481bb76ff1Sjsg 			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
10491bb76ff1Sjsg 				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
10501bb76ff1Sjsg 					dev_err(adev->dev,
10511bb76ff1Sjsg 						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
10521bb76ff1Sjsg 				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
10531bb76ff1Sjsg 					dev_err(adev->dev,
10541bb76ff1Sjsg 						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
10551bb76ff1Sjsg 				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
10561bb76ff1Sjsg 					dev_err(adev->dev,
10571bb76ff1Sjsg 						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
10581bb76ff1Sjsg 				} else {
10591bb76ff1Sjsg 					dev_err(adev->dev,
10601bb76ff1Sjsg 						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
10611bb76ff1Sjsg 				}
10621bb76ff1Sjsg 			} else {
10631bb76ff1Sjsg 				dev_err(adev->dev,
10641bb76ff1Sjsg 					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
10651bb76ff1Sjsg 			}
10661bb76ff1Sjsg 		}
10671bb76ff1Sjsg 	}
10681bb76ff1Sjsg 
10691bb76ff1Sjsg 	ret = readl(scratch_reg0);
10706f63516cSjsg 
10716f63516cSjsg 	mutex_unlock(&adev->virt.rlcg_reg_lock);
10726f63516cSjsg 
10731bb76ff1Sjsg 	return ret;
10741bb76ff1Sjsg }
10751bb76ff1Sjsg 
10761bb76ff1Sjsg void amdgpu_sriov_wreg(struct amdgpu_device *adev,
10771bb76ff1Sjsg 		       u32 offset, u32 value,
1078f005ef32Sjsg 		       u32 acc_flags, u32 hwip, u32 xcc_id)
10791bb76ff1Sjsg {
10801bb76ff1Sjsg 	u32 rlcg_flag;
10811bb76ff1Sjsg 
108271d2b8bbSjsg 	if (amdgpu_device_skip_hw_access(adev))
108371d2b8bbSjsg 		return;
108471d2b8bbSjsg 
10851bb76ff1Sjsg 	if (!amdgpu_sriov_runtime(adev) &&
10861bb76ff1Sjsg 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1087f005ef32Sjsg 		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
10881bb76ff1Sjsg 		return;
10891bb76ff1Sjsg 	}
10901bb76ff1Sjsg 
10911bb76ff1Sjsg 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
10921bb76ff1Sjsg 		WREG32_NO_KIQ(offset, value);
10931bb76ff1Sjsg 	else
10941bb76ff1Sjsg 		WREG32(offset, value);
10951bb76ff1Sjsg }
10961bb76ff1Sjsg 
10971bb76ff1Sjsg u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1098f005ef32Sjsg 		      u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
10991bb76ff1Sjsg {
11001bb76ff1Sjsg 	u32 rlcg_flag;
11011bb76ff1Sjsg 
110271d2b8bbSjsg 	if (amdgpu_device_skip_hw_access(adev))
110371d2b8bbSjsg 		return 0;
110471d2b8bbSjsg 
11051bb76ff1Sjsg 	if (!amdgpu_sriov_runtime(adev) &&
11061bb76ff1Sjsg 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1107f005ef32Sjsg 		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
11081bb76ff1Sjsg 
11091bb76ff1Sjsg 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
11101bb76ff1Sjsg 		return RREG32_NO_KIQ(offset);
11111bb76ff1Sjsg 	else
11121bb76ff1Sjsg 		return RREG32(offset);
11131bb76ff1Sjsg }
1114