1 /* $NetBSD: amdgpu_ras_eeprom.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ras_eeprom.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $");
28
29 #include "amdgpu_ras_eeprom.h"
30 #include "amdgpu.h"
31 #include "amdgpu_ras.h"
32 #include <linux/bits.h>
33 #include "smu_v11_0_i2c.h"
34
35 #include <linux/nbsd-namespace.h>
36
37 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
38 #define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
39
40 /*
41 * The 2 macros bellow represent the actual size in bytes that
42 * those entities occupy in the EEPROM memory.
43 * EEPROM_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
44 * uses uint64 to store 6b fields such as retired_page.
45 */
46 #define EEPROM_TABLE_HEADER_SIZE 20
47 #define EEPROM_TABLE_RECORD_SIZE 24
48
49 #define EEPROM_ADDRESS_SIZE 0x2
50
51 /* Table hdr is 'AMDR' */
52 #define EEPROM_TABLE_HDR_VAL 0x414d4452
53 #define EEPROM_TABLE_VER 0x00010000
54
55 /* Assume 2 Mbit size */
56 #define EEPROM_SIZE_BYTES 256000
57 #define EEPROM_PAGE__SIZE_BYTES 256
58 #define EEPROM_HDR_START 0
59 #define EEPROM_RECORD_START (EEPROM_HDR_START + EEPROM_TABLE_HEADER_SIZE)
60 #define EEPROM_MAX_RECORD_NUM ((EEPROM_SIZE_BYTES - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE)
61 #define EEPROM_ADDR_MSB_MASK GENMASK(17, 8)
62
63 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
64
__encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header * hdr,unsigned char * buff)65 static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
66 unsigned char *buff)
67 {
68 uint32_t *pp = (uint32_t *) buff;
69
70 pp[0] = cpu_to_le32(hdr->header);
71 pp[1] = cpu_to_le32(hdr->version);
72 pp[2] = cpu_to_le32(hdr->first_rec_offset);
73 pp[3] = cpu_to_le32(hdr->tbl_size);
74 pp[4] = cpu_to_le32(hdr->checksum);
75 }
76
__decode_table_header_from_buff(struct amdgpu_ras_eeprom_table_header * hdr,unsigned char * buff)77 static void __decode_table_header_from_buff(struct amdgpu_ras_eeprom_table_header *hdr,
78 unsigned char *buff)
79 {
80 uint32_t *pp = (uint32_t *)buff;
81
82 hdr->header = le32_to_cpu(pp[0]);
83 hdr->version = le32_to_cpu(pp[1]);
84 hdr->first_rec_offset = le32_to_cpu(pp[2]);
85 hdr->tbl_size = le32_to_cpu(pp[3]);
86 hdr->checksum = le32_to_cpu(pp[4]);
87 }
88
__update_table_header(struct amdgpu_ras_eeprom_control * control,unsigned char * buff)89 static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
90 unsigned char *buff)
91 {
92 int ret = 0;
93 struct i2c_msg msg = {
94 .addr = 0,
95 .flags = 0,
96 .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
97 .buf = buff,
98 };
99
100
101 *(uint16_t *)buff = EEPROM_HDR_START;
102 __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
103
104 msg.addr = control->i2c_address;
105
106 ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
107 if (ret < 1)
108 DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
109
110 return ret;
111 }
112
113
114
__calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control * control)115 static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
116 {
117 int i;
118 uint32_t tbl_sum = 0;
119
120 /* Header checksum, skip checksum field in the calculation */
121 for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
122 tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
123
124 return tbl_sum;
125 }
126
__calc_recs_byte_sum(struct eeprom_table_record * records,int num)127 static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
128 int num)
129 {
130 int i, j;
131 uint32_t tbl_sum = 0;
132
133 /* Records checksum */
134 for (i = 0; i < num; i++) {
135 struct eeprom_table_record *record = &records[i];
136
137 for (j = 0; j < sizeof(*record); j++) {
138 tbl_sum += *(((unsigned char *)record) + j);
139 }
140 }
141
142 return tbl_sum;
143 }
144
__calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * records,int num)145 static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
146 struct eeprom_table_record *records, int num)
147 {
148 return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
149 }
150
151 /* Checksum = 256 -((sum of all table entries) mod 256) */
__update_tbl_checksum(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * records,int num,uint32_t old_hdr_byte_sum)152 static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
153 struct eeprom_table_record *records, int num,
154 uint32_t old_hdr_byte_sum)
155 {
156 /*
157 * This will update the table sum with new records.
158 *
159 * TODO: What happens when the EEPROM table is to be wrapped around
160 * and old records from start will get overridden.
161 */
162
163 /* need to recalculate updated header byte sum */
164 control->tbl_byte_sum -= old_hdr_byte_sum;
165 control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
166
167 control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
168 }
169
170 /* table sum mod 256 + checksum must equals 256 */
__validate_tbl_checksum(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * records,int num)171 static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
172 struct eeprom_table_record *records, int num)
173 {
174 control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
175
176 if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
177 DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
178 return false;
179 }
180
181 return true;
182 }
183
amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control * control)184 int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
185 {
186 unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
187 struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
188 int ret = 0;
189
190 mutex_lock(&control->tbl_mutex);
191
192 hdr->header = EEPROM_TABLE_HDR_VAL;
193 hdr->version = EEPROM_TABLE_VER;
194 hdr->first_rec_offset = EEPROM_RECORD_START;
195 hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
196
197 control->tbl_byte_sum = 0;
198 __update_tbl_checksum(control, NULL, 0, 0);
199 control->next_addr = EEPROM_RECORD_START;
200
201 ret = __update_table_header(control, buff);
202
203 mutex_unlock(&control->tbl_mutex);
204
205 return ret;
206
207 }
208
amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control * control)209 int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
210 {
211 int ret = 0;
212 struct amdgpu_device *adev = to_amdgpu_device(control);
213 unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
214 struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
215 struct i2c_msg msg = {
216 .addr = 0,
217 .flags = I2C_M_RD,
218 .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
219 .buf = buff,
220 };
221
222 mutex_init(&control->tbl_mutex);
223
224 switch (adev->asic_type) {
225 case CHIP_VEGA20:
226 control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
227 ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
228 break;
229
230 case CHIP_ARCTURUS:
231 control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
232 ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
233 break;
234
235 default:
236 return 0;
237 }
238
239 if (ret) {
240 DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
241 return ret;
242 }
243
244 msg.addr = control->i2c_address;
245
246 /* Read/Create table header from EEPROM address 0 */
247 ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
248 if (ret < 1) {
249 DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
250 return ret;
251 }
252
253 __decode_table_header_from_buff(hdr, &buff[2]);
254
255 if (hdr->header == EEPROM_TABLE_HDR_VAL) {
256 control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
257 EEPROM_TABLE_RECORD_SIZE;
258 control->tbl_byte_sum = __calc_hdr_byte_sum(control);
259 control->next_addr = EEPROM_RECORD_START;
260
261 DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
262 control->num_recs);
263
264 } else {
265 DRM_INFO("Creating new EEPROM table");
266
267 ret = amdgpu_ras_eeprom_reset_table(control);
268 }
269
270 return ret == 1 ? 0 : -EIO;
271 }
272
amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control * control)273 void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
274 {
275 struct amdgpu_device *adev = to_amdgpu_device(control);
276
277 switch (adev->asic_type) {
278 case CHIP_VEGA20:
279 smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
280 break;
281 case CHIP_ARCTURUS:
282 smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
283 break;
284
285 default:
286 return;
287 }
288
289 mutex_destroy(&control->tbl_mutex);
290 }
291
__encode_table_record_to_buff(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * record,unsigned char * buff)292 static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
293 struct eeprom_table_record *record,
294 unsigned char *buff)
295 {
296 __le64 tmp = 0;
297 int i = 0;
298
299 /* Next are all record fields according to EEPROM page spec in LE foramt */
300 buff[i++] = record->err_type;
301
302 buff[i++] = record->bank;
303
304 tmp = cpu_to_le64(record->ts);
305 memcpy(buff + i, &tmp, 8);
306 i += 8;
307
308 tmp = cpu_to_le64((record->offset & 0xffffffffffff));
309 memcpy(buff + i, &tmp, 6);
310 i += 6;
311
312 buff[i++] = record->mem_channel;
313 buff[i++] = record->mcumc_id;
314
315 tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
316 memcpy(buff + i, &tmp, 6);
317 }
318
__decode_table_record_from_buff(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * record,unsigned char * buff)319 static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control,
320 struct eeprom_table_record *record,
321 unsigned char *buff)
322 {
323 __le64 tmp = 0;
324 int i = 0;
325
326 /* Next are all record fields according to EEPROM page spec in LE foramt */
327 record->err_type = buff[i++];
328
329 record->bank = buff[i++];
330
331 memcpy(&tmp, buff + i, 8);
332 record->ts = le64_to_cpu(tmp);
333 i += 8;
334
335 memcpy(&tmp, buff + i, 6);
336 record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
337 i += 6;
338
339 record->mem_channel = buff[i++];
340 record->mcumc_id = buff[i++];
341
342 memcpy(&tmp, buff + i, 6);
343 record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
344 }
345
346 /*
347 * When reaching end of EEPROM memory jump back to 0 record address
348 * When next record access will go beyond EEPROM page boundary modify bits A17/A8
349 * in I2C selector to go to next page
350 */
__correct_eeprom_dest_address(uint32_t curr_address)351 static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
352 {
353 uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE;
354
355 /* When all EEPROM memory used jump back to 0 address */
356 if (next_address > EEPROM_SIZE_BYTES) {
357 DRM_INFO("Reached end of EEPROM memory, jumping to 0 "
358 "and overriding old record");
359 return EEPROM_RECORD_START;
360 }
361
362 /*
363 * To check if we overflow page boundary compare next address with
364 * current and see if bits 17/8 of the EEPROM address will change
365 * If they do start from the next 256b page
366 *
367 * https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2
368 */
369 if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) {
370 DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx",
371 (next_address & EEPROM_ADDR_MSB_MASK));
372
373 return (next_address & EEPROM_ADDR_MSB_MASK);
374 }
375
376 return curr_address;
377 }
378
amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control * control,struct eeprom_table_record * records,bool write,int num)379 int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
380 struct eeprom_table_record *records,
381 bool write,
382 int num)
383 {
384 int i, ret = 0;
385 struct i2c_msg *msgs, *msg;
386 unsigned char *buffs, *buff;
387 struct eeprom_table_record *record;
388 struct amdgpu_device *adev = to_amdgpu_device(control);
389
390 if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
391 return 0;
392
393 buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
394 GFP_KERNEL);
395 if (!buffs)
396 return -ENOMEM;
397
398 mutex_lock(&control->tbl_mutex);
399
400 msgs = kcalloc(num, sizeof(*msgs), GFP_KERNEL);
401 if (!msgs) {
402 ret = -ENOMEM;
403 goto free_buff;
404 }
405
406 /* In case of overflow just start from beginning to not lose newest records */
407 if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
408 control->next_addr = EEPROM_RECORD_START;
409
410
411 /*
412 * TODO Currently makes EEPROM writes for each record, this creates
413 * internal fragmentation. Optimized the code to do full page write of
414 * 256b
415 */
416 for (i = 0; i < num; i++) {
417 buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
418 record = &records[i];
419 msg = &msgs[i];
420
421 control->next_addr = __correct_eeprom_dest_address(control->next_addr);
422
423 /*
424 * Update bits 16,17 of EEPROM address in I2C address by setting them
425 * to bits 1,2 of Device address byte
426 */
427 msg->addr = control->i2c_address |
428 ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
429 msg->flags = write ? 0 : I2C_M_RD;
430 msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
431 msg->buf = buff;
432
433 /* Insert the EEPROM dest addess, bits 0-15 */
434 buff[0] = ((control->next_addr >> 8) & 0xff);
435 buff[1] = (control->next_addr & 0xff);
436
437 /* EEPROM table content is stored in LE format */
438 if (write)
439 __encode_table_record_to_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
440
441 /*
442 * The destination EEPROM address might need to be corrected to account
443 * for page or entire memory wrapping
444 */
445 control->next_addr += EEPROM_TABLE_RECORD_SIZE;
446 }
447
448 ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
449 if (ret < 1) {
450 DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
451
452 /* TODO Restore prev next EEPROM address ? */
453 goto free_msgs;
454 }
455
456
457 if (!write) {
458 for (i = 0; i < num; i++) {
459 buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
460 record = &records[i];
461
462 __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
463 }
464 }
465
466 if (write) {
467 uint32_t old_hdr_byte_sum = __calc_hdr_byte_sum(control);
468
469 /*
470 * Update table header with size and CRC and account for table
471 * wrap around where the assumption is that we treat it as empty
472 * table
473 *
474 * TODO - Check the assumption is correct
475 */
476 control->num_recs += num;
477 control->num_recs %= EEPROM_MAX_RECORD_NUM;
478 control->tbl_hdr.tbl_size += EEPROM_TABLE_RECORD_SIZE * num;
479 if (control->tbl_hdr.tbl_size > EEPROM_SIZE_BYTES)
480 control->tbl_hdr.tbl_size = EEPROM_TABLE_HEADER_SIZE +
481 control->num_recs * EEPROM_TABLE_RECORD_SIZE;
482
483 __update_tbl_checksum(control, records, num, old_hdr_byte_sum);
484
485 __update_table_header(control, buffs);
486 } else if (!__validate_tbl_checksum(control, records, num)) {
487 DRM_WARN("EEPROM Table checksum mismatch!");
488 /* TODO Uncomment when EEPROM read/write is relliable */
489 /* ret = -EIO; */
490 }
491
492 free_msgs:
493 kfree(msgs);
494
495 free_buff:
496 kfree(buffs);
497
498 mutex_unlock(&control->tbl_mutex);
499
500 return ret == num ? 0 : -EIO;
501 }
502
503 /* Used for testing if bugs encountered */
504 #if 0
505 void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)
506 {
507 int i;
508 struct eeprom_table_record *recs = kcalloc(1, sizeof(*recs), GFP_KERNEL);
509
510 if (!recs)
511 return;
512
513 for (i = 0; i < 1 ; i++) {
514 recs[i].address = 0xdeadbeef;
515 recs[i].retired_page = i;
516 }
517
518 if (!amdgpu_ras_eeprom_process_recods(control, recs, true, 1)) {
519
520 memset(recs, 0, sizeof(*recs) * 1);
521
522 control->next_addr = EEPROM_RECORD_START;
523
524 if (!amdgpu_ras_eeprom_process_recods(control, recs, false, 1)) {
525 for (i = 0; i < 1; i++)
526 DRM_INFO("rec.address :0x%llx, rec.retired_page :%llu",
527 recs[i].address, recs[i].retired_page);
528 } else
529 DRM_ERROR("Failed in reading from table");
530
531 } else
532 DRM_ERROR("Failed in writing to table");
533 }
534 #endif
535