xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_xgmi.c (revision 46035553bfdd96e63c94e32da0210227ec2e3cf1)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_smu.h"
28 #include "amdgpu_ras.h"
29 #include "soc15.h"
30 #include "df/df_3_6_offset.h"
31 #include "xgmi/xgmi_4_0_0_smn.h"
32 #include "xgmi/xgmi_4_0_0_sh_mask.h"
33 #include "wafl/wafl2_4_0_0_smn.h"
34 #include "wafl/wafl2_4_0_0_sh_mask.h"
35 
36 static DEFINE_MUTEX(xgmi_mutex);
37 
38 #define AMDGPU_MAX_XGMI_HIVE			8
39 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
40 
41 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
42 static unsigned hive_count = 0;
43 
44 static const int xgmi_pcs_err_status_reg_vg20[] = {
45 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
46 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
47 };
48 
49 static const int wafl_pcs_err_status_reg_vg20[] = {
50 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
51 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
52 };
53 
54 static const int xgmi_pcs_err_status_reg_arct[] = {
55 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
56 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
57 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
58 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
59 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
60 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
61 };
62 
63 /* same as vg20*/
64 static const int wafl_pcs_err_status_reg_arct[] = {
65 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
66 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
67 };
68 
69 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
70 	{"XGMI PCS DataLossErr",
71 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
72 	{"XGMI PCS TrainingErr",
73 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
74 	{"XGMI PCS CRCErr",
75 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
76 	{"XGMI PCS BERExceededErr",
77 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
78 	{"XGMI PCS TxMetaDataErr",
79 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
80 	{"XGMI PCS ReplayBufParityErr",
81 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
82 	{"XGMI PCS DataParityErr",
83 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
84 	{"XGMI PCS ReplayFifoOverflowErr",
85 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
86 	{"XGMI PCS ReplayFifoUnderflowErr",
87 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
88 	{"XGMI PCS ElasticFifoOverflowErr",
89 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
90 	{"XGMI PCS DeskewErr",
91 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
92 	{"XGMI PCS DataStartupLimitErr",
93 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
94 	{"XGMI PCS FCInitTimeoutErr",
95 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
96 	{"XGMI PCS RecoveryTimeoutErr",
97 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
98 	{"XGMI PCS ReadySerialTimeoutErr",
99 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
100 	{"XGMI PCS ReadySerialAttemptErr",
101 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
102 	{"XGMI PCS RecoveryAttemptErr",
103 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
104 	{"XGMI PCS RecoveryRelockAttemptErr",
105 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
106 };
107 
108 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
109 	{"WAFL PCS DataLossErr",
110 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
111 	{"WAFL PCS TrainingErr",
112 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
113 	{"WAFL PCS CRCErr",
114 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
115 	{"WAFL PCS BERExceededErr",
116 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
117 	{"WAFL PCS TxMetaDataErr",
118 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
119 	{"WAFL PCS ReplayBufParityErr",
120 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
121 	{"WAFL PCS DataParityErr",
122 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
123 	{"WAFL PCS ReplayFifoOverflowErr",
124 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
125 	{"WAFL PCS ReplayFifoUnderflowErr",
126 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
127 	{"WAFL PCS ElasticFifoOverflowErr",
128 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
129 	{"WAFL PCS DeskewErr",
130 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
131 	{"WAFL PCS DataStartupLimitErr",
132 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
133 	{"WAFL PCS FCInitTimeoutErr",
134 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
135 	{"WAFL PCS RecoveryTimeoutErr",
136 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
137 	{"WAFL PCS ReadySerialTimeoutErr",
138 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
139 	{"WAFL PCS ReadySerialAttemptErr",
140 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
141 	{"WAFL PCS RecoveryAttemptErr",
142 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
143 	{"WAFL PCS RecoveryRelockAttemptErr",
144 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
145 };
146 
147 void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
148 {
149 	return &hive->device_list;
150 }
151 
152 /**
153  * DOC: AMDGPU XGMI Support
154  *
155  * XGMI is a high speed interconnect that joins multiple GPU cards
156  * into a homogeneous memory space that is organized by a collective
157  * hive ID and individual node IDs, both of which are 64-bit numbers.
158  *
159  * The file xgmi_device_id contains the unique per GPU device ID and
160  * is stored in the /sys/class/drm/card${cardno}/device/ directory.
161  *
162  * Inside the device directory a sub-directory 'xgmi_hive_info' is
163  * created which contains the hive ID and the list of nodes.
164  *
165  * The hive ID is stored in:
166  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
167  *
168  * The node information is stored in numbered directories:
169  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
170  *
171  * Each device has their own xgmi_hive_info direction with a mirror
172  * set of node sub-directories.
173  *
174  * The XGMI memory space is built by contiguously adding the power of
175  * two padded VRAM space from each node to each other.
176  *
177  */
178 
179 
180 static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct amdgpu_hive_info *hive =
184 			container_of(attr, struct amdgpu_hive_info, dev_attr);
185 
186 	return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
187 }
188 
189 static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
190 				    struct amdgpu_hive_info *hive)
191 {
192 	STUB();
193 	return -ENOSYS;
194 #ifdef notyet
195 	int ret = 0;
196 
197 	if (WARN_ON(hive->kobj))
198 		return -EINVAL;
199 
200 	hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
201 	if (!hive->kobj) {
202 		dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
203 		return -EINVAL;
204 	}
205 
206 	hive->dev_attr = (struct device_attribute) {
207 		.attr = {
208 			.name = "xgmi_hive_id",
209 			.mode = S_IRUGO,
210 
211 		},
212 		.show = amdgpu_xgmi_show_hive_id,
213 	};
214 
215 	ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
216 	if (ret) {
217 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
218 		kobject_del(hive->kobj);
219 		kobject_put(hive->kobj);
220 		hive->kobj = NULL;
221 	}
222 
223 	return ret;
224 #endif
225 }
226 
227 static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
228 				    struct amdgpu_hive_info *hive)
229 {
230 	sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
231 	kobject_del(hive->kobj);
232 	kobject_put(hive->kobj);
233 	hive->kobj = NULL;
234 }
235 
236 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
237 				     struct device_attribute *attr,
238 				     char *buf)
239 {
240 	struct drm_device *ddev = dev_get_drvdata(dev);
241 	struct amdgpu_device *adev = ddev->dev_private;
242 
243 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
244 
245 }
246 
247 #define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
248 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
249 				      struct device_attribute *attr,
250 				      char *buf)
251 {
252 	struct drm_device *ddev = dev_get_drvdata(dev);
253 	struct amdgpu_device *adev = ddev->dev_private;
254 	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
255 	uint64_t fica_out;
256 	unsigned int error_count = 0;
257 
258 	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
259 	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
260 
261 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
262 	if (fica_out != 0x1f)
263 		pr_err("xGMI error counters not enabled!\n");
264 
265 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
266 
267 	if ((fica_out & 0xffff) == 2)
268 		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
269 
270 	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
271 
272 	return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
273 }
274 
275 
276 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
277 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
278 
279 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
280 					 struct amdgpu_hive_info *hive)
281 {
282 	int ret = 0;
283 	char node[10] = { 0 };
284 
285 	/* Create xgmi device id file */
286 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
287 	if (ret) {
288 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
289 		return ret;
290 	}
291 
292 	/* Create xgmi error file */
293 	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
294 	if (ret)
295 		pr_err("failed to create xgmi_error\n");
296 
297 
298 	/* Create sysfs link to hive info folder on the first device */
299 	if (adev != hive->adev) {
300 		ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
301 					"xgmi_hive_info");
302 		if (ret) {
303 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
304 			goto remove_file;
305 		}
306 	}
307 
308 	snprintf(node, sizeof(node), "node%d", hive->number_devices);
309 	/* Create sysfs link form the hive folder to yourself */
310 	ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
311 	if (ret) {
312 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
313 		goto remove_link;
314 	}
315 
316 	goto success;
317 
318 
319 remove_link:
320 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
321 
322 remove_file:
323 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
324 
325 success:
326 	return ret;
327 }
328 
329 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
330 					  struct amdgpu_hive_info *hive)
331 {
332 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
333 	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
334 	sysfs_remove_link(hive->kobj, adev->ddev->unique);
335 }
336 
337 
338 
339 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
340 {
341 	int i;
342 	struct amdgpu_hive_info *tmp;
343 
344 	if (!adev->gmc.xgmi.hive_id)
345 		return NULL;
346 
347 	mutex_lock(&xgmi_mutex);
348 
349 	for (i = 0 ; i < hive_count; ++i) {
350 		tmp = &xgmi_hives[i];
351 		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
352 			if (lock)
353 				mutex_lock(&tmp->hive_lock);
354 			mutex_unlock(&xgmi_mutex);
355 			return tmp;
356 		}
357 	}
358 	if (i >= AMDGPU_MAX_XGMI_HIVE) {
359 		mutex_unlock(&xgmi_mutex);
360 		return NULL;
361 	}
362 
363 	/* initialize new hive if not exist */
364 	tmp = &xgmi_hives[hive_count++];
365 
366 	if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
367 		mutex_unlock(&xgmi_mutex);
368 		return NULL;
369 	}
370 
371 	tmp->adev = adev;
372 	tmp->hive_id = adev->gmc.xgmi.hive_id;
373 	INIT_LIST_HEAD(&tmp->device_list);
374 	rw_init(&tmp->hive_lock, "aghive");
375 	rw_init(&tmp->reset_lock, "aghvres");
376 	task_barrier_init(&tmp->tb);
377 
378 	if (lock)
379 		mutex_lock(&tmp->hive_lock);
380 	tmp->pstate = -1;
381 	mutex_unlock(&xgmi_mutex);
382 
383 	return tmp;
384 }
385 
386 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
387 {
388 	int ret = 0;
389 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
390 	struct amdgpu_device *tmp_adev;
391 	bool update_hive_pstate = true;
392 	bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
393 
394 	if (!hive)
395 		return 0;
396 
397 	mutex_lock(&hive->hive_lock);
398 
399 	if (hive->pstate == pstate) {
400 		adev->pstate = is_high_pstate ? pstate : adev->pstate;
401 		goto out;
402 	}
403 
404 	dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
405 
406 	ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
407 	if (ret) {
408 		dev_err(adev->dev,
409 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
410 			adev->gmc.xgmi.node_id,
411 			adev->gmc.xgmi.hive_id, ret);
412 		goto out;
413 	}
414 
415 	/* Update device pstate */
416 	adev->pstate = pstate;
417 
418 	/*
419 	 * Update the hive pstate only all devices of the hive
420 	 * are in the same pstate
421 	 */
422 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
423 		if (tmp_adev->pstate != adev->pstate) {
424 			update_hive_pstate = false;
425 			break;
426 		}
427 	}
428 	if (update_hive_pstate || is_high_pstate)
429 		hive->pstate = pstate;
430 
431 out:
432 	mutex_unlock(&hive->hive_lock);
433 
434 	return ret;
435 }
436 
437 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
438 {
439 	int ret = -EINVAL;
440 
441 	/* Each psp need to set the latest topology */
442 	ret = psp_xgmi_set_topology_info(&adev->psp,
443 					 hive->number_devices,
444 					 &adev->psp.xgmi_context.top_info);
445 	if (ret)
446 		dev_err(adev->dev,
447 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
448 			adev->gmc.xgmi.node_id,
449 			adev->gmc.xgmi.hive_id, ret);
450 
451 	return ret;
452 }
453 
454 
455 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
456 		struct amdgpu_device *peer_adev)
457 {
458 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
459 	int i;
460 
461 	for (i = 0 ; i < top->num_nodes; ++i)
462 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
463 			return top->nodes[i].num_hops;
464 	return	-EINVAL;
465 }
466 
467 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
468 {
469 	struct psp_xgmi_topology_info *top_info;
470 	struct amdgpu_hive_info *hive;
471 	struct amdgpu_xgmi	*entry;
472 	struct amdgpu_device *tmp_adev = NULL;
473 
474 	int count = 0, ret = 0;
475 
476 	if (!adev->gmc.xgmi.supported)
477 		return 0;
478 
479 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
480 		ret = psp_xgmi_initialize(&adev->psp);
481 		if (ret) {
482 			dev_err(adev->dev,
483 				"XGMI: Failed to initialize xgmi session\n");
484 			return ret;
485 		}
486 
487 		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
488 		if (ret) {
489 			dev_err(adev->dev,
490 				"XGMI: Failed to get hive id\n");
491 			return ret;
492 		}
493 
494 		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
495 		if (ret) {
496 			dev_err(adev->dev,
497 				"XGMI: Failed to get node id\n");
498 			return ret;
499 		}
500 	} else {
501 		adev->gmc.xgmi.hive_id = 16;
502 		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
503 	}
504 
505 	hive = amdgpu_get_xgmi_hive(adev, 1);
506 	if (!hive) {
507 		ret = -EINVAL;
508 		dev_err(adev->dev,
509 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
510 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
511 		goto exit;
512 	}
513 
514 	/* Set default device pstate */
515 	adev->pstate = -1;
516 
517 	top_info = &adev->psp.xgmi_context.top_info;
518 
519 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
520 	list_for_each_entry(entry, &hive->device_list, head)
521 		top_info->nodes[count++].node_id = entry->node_id;
522 	top_info->num_nodes = count;
523 	hive->number_devices = count;
524 
525 	task_barrier_add_task(&hive->tb);
526 
527 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
528 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
529 			/* update node list for other device in the hive */
530 			if (tmp_adev != adev) {
531 				top_info = &tmp_adev->psp.xgmi_context.top_info;
532 				top_info->nodes[count - 1].node_id =
533 					adev->gmc.xgmi.node_id;
534 				top_info->num_nodes = count;
535 			}
536 			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
537 			if (ret)
538 				goto exit;
539 		}
540 
541 		/* get latest topology info for each device from psp */
542 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
543 			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
544 					&tmp_adev->psp.xgmi_context.top_info);
545 			if (ret) {
546 				dev_err(tmp_adev->dev,
547 					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
548 					tmp_adev->gmc.xgmi.node_id,
549 					tmp_adev->gmc.xgmi.hive_id, ret);
550 				/* To do : continue with some node failed or disable the whole hive */
551 				goto exit;
552 			}
553 		}
554 	}
555 
556 	if (!ret)
557 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
558 
559 
560 	mutex_unlock(&hive->hive_lock);
561 exit:
562 	if (!ret)
563 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
564 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
565 	else
566 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
567 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
568 			ret);
569 
570 	return ret;
571 }
572 
573 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
574 {
575 	struct amdgpu_hive_info *hive;
576 
577 	if (!adev->gmc.xgmi.supported)
578 		return -EINVAL;
579 
580 	hive = amdgpu_get_xgmi_hive(adev, 1);
581 	if (!hive)
582 		return -EINVAL;
583 
584 	if (!(hive->number_devices--)) {
585 		amdgpu_xgmi_sysfs_destroy(adev, hive);
586 		mutex_destroy(&hive->hive_lock);
587 		mutex_destroy(&hive->reset_lock);
588 	} else {
589 		task_barrier_rem_task(&hive->tb);
590 		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
591 		mutex_unlock(&hive->hive_lock);
592 	}
593 
594 	return psp_xgmi_terminate(&adev->psp);
595 }
596 
597 int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
598 {
599 	int r;
600 	struct ras_ih_if ih_info = {
601 		.cb = NULL,
602 	};
603 	struct ras_fs_if fs_info = {
604 		.sysfs_name = "xgmi_wafl_err_count",
605 	};
606 
607 	if (!adev->gmc.xgmi.supported ||
608 	    adev->gmc.xgmi.num_physical_nodes == 0)
609 		return 0;
610 
611 	if (!adev->gmc.xgmi.ras_if) {
612 		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
613 		if (!adev->gmc.xgmi.ras_if)
614 			return -ENOMEM;
615 		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
616 		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
617 		adev->gmc.xgmi.ras_if->sub_block_index = 0;
618 		strlcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl", sizeof(adev->gmc.xgmi.ras_if->name));
619 	}
620 	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
621 	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
622 				 &fs_info, &ih_info);
623 	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
624 		kfree(adev->gmc.xgmi.ras_if);
625 		adev->gmc.xgmi.ras_if = NULL;
626 	}
627 
628 	return r;
629 }
630 
631 void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
632 {
633 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
634 			adev->gmc.xgmi.ras_if) {
635 		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
636 		struct ras_ih_if ih_info = {
637 			.cb = NULL,
638 		};
639 
640 		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
641 		kfree(ras_if);
642 	}
643 }
644 
645 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
646 					   uint64_t addr)
647 {
648 	uint32_t df_inst_id;
649 	uint64_t dram_base_addr = 0;
650 	const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
651 
652 	if ((!df_funcs)                 ||
653 	    (!df_funcs->get_df_inst_id) ||
654 	    (!df_funcs->get_dram_base_addr)) {
655 		dev_warn(adev->dev,
656 			 "XGMI: relative phy_addr algorithm is not supported\n");
657 		return addr;
658 	}
659 
660 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
661 		dev_warn(adev->dev,
662 			 "failed to disable DF-Cstate, DF register may not be accessible\n");
663 		return addr;
664 	}
665 
666 	df_inst_id = df_funcs->get_df_inst_id(adev);
667 	dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
668 
669 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
670 		dev_warn(adev->dev, "failed to enable DF-Cstate\n");
671 
672 	return addr + dram_base_addr;
673 }
674 
675 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
676 					      uint32_t value,
677 					      uint32_t *ue_count,
678 					      uint32_t *ce_count,
679 					      bool is_xgmi_pcs)
680 {
681 	int i;
682 	int ue_cnt;
683 
684 	if (is_xgmi_pcs) {
685 		/* query xgmi pcs error status,
686 		 * only ue is supported */
687 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
688 			ue_cnt = (value &
689 				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
690 				  xgmi_pcs_ras_fields[i].pcs_err_shift;
691 			if (ue_cnt) {
692 				dev_info(adev->dev, "%s detected\n",
693 					 xgmi_pcs_ras_fields[i].err_name);
694 				*ue_count += ue_cnt;
695 			}
696 		}
697 	} else {
698 		/* query wafl pcs error status,
699 		 * only ue is supported */
700 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
701 			ue_cnt = (value &
702 				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
703 				  wafl_pcs_ras_fields[i].pcs_err_shift;
704 			if (ue_cnt) {
705 				dev_info(adev->dev, "%s detected\n",
706 					 wafl_pcs_ras_fields[i].err_name);
707 				*ue_count += ue_cnt;
708 			}
709 		}
710 	}
711 
712 	return 0;
713 }
714 
715 int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
716 				      void *ras_error_status)
717 {
718 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
719 	int i;
720 	uint32_t data;
721 	uint32_t ue_cnt = 0, ce_cnt = 0;
722 
723 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
724 		return -EINVAL;
725 
726 	err_data->ue_count = 0;
727 	err_data->ce_count = 0;
728 
729 	switch (adev->asic_type) {
730 	case CHIP_ARCTURUS:
731 		/* check xgmi pcs error */
732 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
733 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
734 			if (data)
735 				amdgpu_xgmi_query_pcs_error_status(adev,
736 						data, &ue_cnt, &ce_cnt, true);
737 		}
738 		/* check wafl pcs error */
739 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
740 			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
741 			if (data)
742 				amdgpu_xgmi_query_pcs_error_status(adev,
743 						data, &ue_cnt, &ce_cnt, false);
744 		}
745 		break;
746 	case CHIP_VEGA20:
747 	default:
748 		/* check xgmi pcs error */
749 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
750 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
751 			if (data)
752 				amdgpu_xgmi_query_pcs_error_status(adev,
753 						data, &ue_cnt, &ce_cnt, true);
754 		}
755 		/* check wafl pcs error */
756 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
757 			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
758 			if (data)
759 				amdgpu_xgmi_query_pcs_error_status(adev,
760 						data, &ue_cnt, &ce_cnt, false);
761 		}
762 		break;
763 	}
764 
765 	err_data->ue_count += ue_cnt;
766 	err_data->ce_count += ce_cnt;
767 
768 	return 0;
769 }
770