xref: /netbsd-src/sys/dev/ic/spdmem.c (revision d87b0909477dab16f0c03fda67643a13790682cb)
1 /* $NetBSD: spdmem.c,v 1.38 2022/02/02 22:43:14 nakayama Exp $ */
2 
3 /*
4  * Copyright (c) 2007 Nicolas Joly
5  * Copyright (c) 2007 Paul Goyette
6  * Copyright (c) 2007 Tobias Nygren
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Serial Presence Detect (SPD) memory identification
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.38 2022/02/02 22:43:14 nakayama Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45 
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49 
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 	int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60 
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 			      int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64 
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66 
67 static const char* const spdmem_basic_types[] = {
68 	"unknown",
69 	"FPM",
70 	"EDO",
71 	"Pipelined Nibble",
72 	"SDRAM",
73 	"ROM",
74 	"DDR SGRAM",
75 	"DDR SDRAM",
76 	"DDR2 SDRAM",
77 	"DDR2 SDRAM FB",
78 	"DDR2 SDRAM FB Probe",
79 	"DDR3 SDRAM",
80 	"DDR4 SDRAM",
81 	"unknown",
82 	"DDR4E SDRAM",
83 	"LPDDR3 SDRAM",
84 	"LPDDR4 SDRAM",
85 	"LPDDR4X SDRAM",
86 	"DDR5 SDRAM"
87 };
88 
89 static const char* const spdmem_ddr4_module_types[] = {
90 	"DDR4 Extended",
91 	"DDR4 RDIMM",
92 	"DDR4 UDIMM",
93 	"DDR4 SO-DIMM",
94 	"DDR4 Load-Reduced DIMM",
95 	"DDR4 Mini-RDIMM",
96 	"DDR4 Mini-UDIMM",
97 	"DDR4 Reserved",
98 	"DDR4 72Bit SO-RDIMM",
99 	"DDR4 72Bit SO-UDIMM",
100 	"DDR4 Undefined",
101 	"DDR4 Reserved",
102 	"DDR4 16Bit SO-DIMM",
103 	"DDR4 32Bit SO-DIMM",
104 	"DDR4 Reserved",
105 	"DDR4 Undefined"
106 };
107 
108 static const char* const spdmem_superset_types[] = {
109 	"unknown",
110 	"ESDRAM",
111 	"DDR ESDRAM",
112 	"PEM EDO",
113 	"PEM SDRAM"
114 };
115 
116 static const char* const spdmem_voltage_types[] = {
117 	"TTL (5V tolerant)",
118 	"LvTTL (not 5V tolerant)",
119 	"HSTL 1.5V",
120 	"SSTL 3.3V",
121 	"SSTL 2.5V",
122 	"SSTL 1.8V"
123 };
124 
125 static const char* const spdmem_refresh_types[] = {
126 	"15.625us",
127 	"3.9us",
128 	"7.8us",
129 	"31.3us",
130 	"62.5us",
131 	"125us"
132 };
133 
134 static const char* const spdmem_parity_types[] = {
135 	"no parity or ECC",
136 	"data parity",
137 	"data ECC",
138 	"data parity and ECC",
139 	"cmd/addr parity",
140 	"cmd/addr/data parity",
141 	"cmd/addr parity, data ECC",
142 	"cmd/addr/data parity, data ECC"
143 };
144 
145 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
146 
147 
148 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
149 static const uint16_t spdmem_cycle_frac[] = {
150 	0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
151 	250, 333, 667, 750, 999, 999
152 };
153 
154 /* Format string for timing info */
155 #define	LATENCY	"tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
156 
157 /* CRC functions used for certain memory types */
158 
159 static uint16_t
spdcrc16(struct spdmem_softc * sc,int count)160 spdcrc16(struct spdmem_softc *sc, int count)
161 {
162 	uint16_t crc;
163 	int i, j;
164 	uint8_t val;
165 	crc = 0;
166 	for (j = 0; j <= count; j++) {
167 		(sc->sc_read)(sc, j, &val);
168 		crc = crc ^ val << 8;
169 		for (i = 0; i < 8; ++i)
170 			if (crc & 0x8000)
171 				crc = crc << 1 ^ 0x1021;
172 			else
173 				crc = crc << 1;
174 	}
175 	return (crc & 0xFFFF);
176 }
177 
178 int
spdmem_common_probe(struct spdmem_softc * sc)179 spdmem_common_probe(struct spdmem_softc *sc)
180 {
181 	int cksum = 0;
182 	uint8_t i, val, spd_type;
183 	int spd_len, spd_crc_cover;
184 	uint16_t crc_calc, crc_spd;
185 
186 	/* Read failed means a device doesn't exist */
187 	if ((sc->sc_read)(sc, 2, &spd_type) != 0)
188 		return 0;
189 
190 	/* Memory type should not be 0 */
191 	if (spd_type == 0x00)
192 		return 0;
193 
194 	/* For older memory types, validate the checksum over 1st 63 bytes */
195 	if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
196 		for (i = 0; i < 63; i++) {
197 			(sc->sc_read)(sc, i, &val);
198 			cksum += val;
199 		}
200 
201 		(sc->sc_read)(sc, 63, &val);
202 
203 		if ((cksum & 0xff) != val) {
204 			aprint_debug("spd checksum failed, calc = 0x%02x, "
205 				     "spd = 0x%02x\n", cksum, val);
206 			return 0;
207 		} else
208 			return 1;
209 	}
210 
211 	/* For DDR3 and FBDIMM, verify the CRC */
212 	else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
213 		(sc->sc_read)(sc, 0, &val);
214 		spd_len = val;
215 		if (spd_len & SPDMEM_SPDCRC_116)
216 			spd_crc_cover = 116;
217 		else
218 			spd_crc_cover = 125;
219 		switch (spd_len & SPDMEM_SPDLEN_MASK) {
220 		case SPDMEM_SPDLEN_128:
221 			spd_len = 128;
222 			break;
223 		case SPDMEM_SPDLEN_176:
224 			spd_len = 176;
225 			break;
226 		case SPDMEM_SPDLEN_256:
227 			spd_len = 256;
228 			break;
229 		default:
230 			return 0;
231 		}
232 		if (spd_crc_cover > spd_len)
233 			return 0;
234 		crc_calc = spdcrc16(sc, spd_crc_cover);
235 		(sc->sc_read)(sc, 127, &val);
236 		crc_spd = val << 8;
237 		(sc->sc_read)(sc, 126, &val);
238 		crc_spd |= val;
239 		if (crc_calc != crc_spd) {
240 			aprint_debug("crc16 failed, covers %d bytes, "
241 				     "calc = 0x%04x, spd = 0x%04x\n",
242 				     spd_crc_cover, crc_calc, crc_spd);
243 			return 0;
244 		}
245 		return 1;
246 	} else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
247 		(sc->sc_read)(sc, 0, &val);
248 		spd_len = val & 0x0f;
249 		if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes))
250 			return 0;
251 		spd_len = spd_rom_sizes[spd_len];
252 		spd_crc_cover = 125; /* For byte 0 to 125 */
253 		if (spd_crc_cover > spd_len)
254 			return 0;
255 		crc_calc = spdcrc16(sc, spd_crc_cover);
256 		(sc->sc_read)(sc, 127, &val);
257 		crc_spd = val << 8;
258 		(sc->sc_read)(sc, 126, &val);
259 		crc_spd |= val;
260 		if (crc_calc != crc_spd) {
261 			aprint_debug("crc16 failed, covers %d bytes, "
262 				     "calc = 0x%04x, spd = 0x%04x\n",
263 				     spd_crc_cover, crc_calc, crc_spd);
264 			return 0;
265 		}
266 		/*
267 		 * We probably could also verify the CRC for the other
268 		 * "pages" of SPD data in blocks 1 and 2, but we'll do
269 		 * it some other time.
270 		 */
271 		return 1;
272 	} else if (spd_type == SPDMEM_MEMTYPE_DDR5SDRAM) {
273 		/* XXX Need Datasheet. */
274 		(sc->sc_read)(sc, 0, &val);
275 		spd_len = val & 0x0f;
276 		if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes))
277 			return 0;
278 		aprint_verbose("DDR5 SPD ROM?\n");
279 		return 0;
280 	}
281 
282 	/* For unrecognized memory types, don't match at all */
283 	return 0;
284 }
285 
286 void
spdmem_common_attach(struct spdmem_softc * sc,device_t self)287 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
288 {
289 	struct spdmem *s = &(sc->sc_spd_data);
290 	const char *type;
291 	const char *rambus_rev = "Reserved";
292 	int dimm_size;
293 	unsigned int i, spd_len, spd_size;
294 	const struct sysctlnode *node = NULL;
295 
296 	(sc->sc_read)(sc, 0, &s->sm_len);
297 	(sc->sc_read)(sc, 1, &s->sm_size);
298 	(sc->sc_read)(sc, 2, &s->sm_type);
299 
300 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
301 		/*
302 		 * An even newer encoding with one byte holding both
303 		 * the used-size and capacity values
304 		 */
305 		spd_len = s->sm_len & 0x0f;
306 		spd_size = (s->sm_len >> 4) & 0x07;
307 
308 		spd_len = spd_rom_sizes[spd_len];
309 		spd_size *= 512;
310 
311 	} else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
312 		/*
313 		 * FBDIMM and DDR3 (and probably all newer) have a different
314 		 * encoding of the SPD EEPROM used/total sizes
315 		 */
316 		spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
317 		switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
318 		case SPDMEM_SPDLEN_128:
319 			spd_len = 128;
320 			break;
321 		case SPDMEM_SPDLEN_176:
322 			spd_len = 176;
323 			break;
324 		case SPDMEM_SPDLEN_256:
325 			spd_len = 256;
326 			break;
327 		default:
328 			spd_len = 64;
329 			break;
330 		}
331 	} else {
332 		spd_size = 1 << s->sm_size;
333 		spd_len = s->sm_len;
334 		if (spd_len < 64)
335 			spd_len = 64;
336 	}
337 	if (spd_len > spd_size)
338 		spd_len = spd_size;
339 	if (spd_len > sizeof(struct spdmem))
340 		spd_len = sizeof(struct spdmem);
341 	for (i = 3; i < spd_len; i++)
342 		(sc->sc_read)(sc, i, &((uint8_t *)s)[i]);
343 
344 	/*
345 	 * Setup our sysctl subtree, hw.spdmemN
346 	 */
347 	sc->sc_sysctl_log = NULL;
348 	sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
349 	    0, CTLTYPE_NODE,
350 	    device_xname(self), NULL, NULL, 0, NULL, 0,
351 	    CTL_HW, CTL_CREATE, CTL_EOL);
352 	if (node != NULL && spd_len != 0)
353 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
354 		    0,
355 		    CTLTYPE_STRUCT, "spd_data",
356 		    SYSCTL_DESCR("raw spd data"), NULL,
357 		    0, s, spd_len,
358 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
359 
360 	/*
361 	 * Decode and print key SPD contents
362 	 */
363 	if (IS_RAMBUS_TYPE) {
364 		if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
365 			type = "Rambus";
366 		else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
367 			type = "Direct Rambus";
368 		else
369 			type = "Rambus (unknown)";
370 
371 		switch (s->sm_len) {
372 		case 0:
373 			rambus_rev = "Invalid";
374 			break;
375 		case 1:
376 			rambus_rev = "0.7";
377 			break;
378 		case 2:
379 			rambus_rev = "1.0";
380 			break;
381 		default:
382 			rambus_rev = "Reserved";
383 			break;
384 		}
385 	} else {
386 		if (s->sm_type < __arraycount(spdmem_basic_types))
387 			type = spdmem_basic_types[s->sm_type];
388 		else
389 			type = "unknown memory type";
390 
391 		if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
392 		    s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
393 			type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
394 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
395 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
396 			type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
397 		if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
398 		    s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
399 			type =
400 			    spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
401 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
402 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
403 			type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
404 		}
405 		if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
406 		    s->sm_ddr4.ddr4_mod_type <
407 				__arraycount(spdmem_ddr4_module_types)) {
408 			type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
409 		}
410 	}
411 
412 	strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
413 
414 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
415 		/*
416 		 * The latest spec (DDR4 SPD Document Release 3) defines
417 		 * NVDIMM Hybrid only.
418 		 */
419 		if ((s->sm_ddr4.ddr4_hybrid)
420 		    && (s->sm_ddr4.ddr4_hybrid_media == 1))
421 			strlcat(sc->sc_type, " NVDIMM hybrid",
422 			    SPDMEM_TYPE_MAXLEN);
423 	}
424 
425 	if (node != NULL)
426 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
427 		    0,
428 		    CTLTYPE_STRING, "mem_type",
429 		    SYSCTL_DESCR("memory module type"), NULL,
430 		    0, sc->sc_type, 0,
431 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
432 
433 	if (IS_RAMBUS_TYPE) {
434 		aprint_naive("\n");
435 		aprint_normal("\n");
436 		aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
437 		dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
438 		if (dimm_size >= 1024)
439 			aprint_normal(", %dGB\n", dimm_size / 1024);
440 		else
441 			aprint_normal(", %dMB\n", dimm_size);
442 
443 		/* No further decode for RAMBUS memory */
444 		return;
445 	}
446 	switch (s->sm_type) {
447 	case SPDMEM_MEMTYPE_EDO:
448 	case SPDMEM_MEMTYPE_FPM:
449 		decode_edofpm(node, self, s);
450 		break;
451 	case SPDMEM_MEMTYPE_ROM:
452 		decode_rom(node, self, s);
453 		break;
454 	case SPDMEM_MEMTYPE_SDRAM:
455 		decode_sdram(node, self, s, spd_len);
456 		break;
457 	case SPDMEM_MEMTYPE_DDRSDRAM:
458 		decode_ddr(node, self, s);
459 		break;
460 	case SPDMEM_MEMTYPE_DDR2SDRAM:
461 		decode_ddr2(node, self, s);
462 		break;
463 	case SPDMEM_MEMTYPE_DDR3SDRAM:
464 		decode_ddr3(node, self, s);
465 		break;
466 	case SPDMEM_MEMTYPE_FBDIMM:
467 	case SPDMEM_MEMTYPE_FBDIMM_PROBE:
468 		decode_fbdimm(node, self, s);
469 		break;
470 	case SPDMEM_MEMTYPE_DDR4SDRAM:
471 		decode_ddr4(node, self, s);
472 		break;
473 	}
474 
475 	/* Dump SPD */
476 	for (i = 0; i < spd_len;  i += 16) {
477 		unsigned int j, k;
478 		aprint_debug_dev(self, "0x%02x:", i);
479 		k = (spd_len > (i + 16)) ? i + 16 : spd_len;
480 		for (j = i; j < k; j++)
481 			aprint_debug(" %02x", ((uint8_t *)s)[j]);
482 		aprint_debug("\n");
483 	}
484 }
485 
486 int
spdmem_common_detach(struct spdmem_softc * sc,device_t self)487 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
488 {
489 	sysctl_teardown(&sc->sc_sysctl_log);
490 
491 	return 0;
492 }
493 
494 static void
decode_size_speed(device_t self,const struct sysctlnode * node,int dimm_size,int cycle_time,int d_clk,int bits,bool round,const char * ddr_type_string,int speed)495 decode_size_speed(device_t self, const struct sysctlnode *node,
496 		  int dimm_size, int cycle_time, int d_clk, int bits,
497 		  bool round, const char *ddr_type_string, int speed)
498 {
499 	int p_clk;
500 	struct spdmem_softc *sc = device_private(self);
501 
502 	if (dimm_size < 1024)
503 		aprint_normal("%dMB", dimm_size);
504 	else
505 		aprint_normal("%dGB", dimm_size / 1024);
506 	if (node != NULL)
507 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
508 		    CTLFLAG_IMMEDIATE,
509 		    CTLTYPE_INT, "size",
510 		    SYSCTL_DESCR("module size in MB"), NULL,
511 		    dimm_size, NULL, 0,
512 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
513 
514 	if (cycle_time == 0) {
515 		aprint_normal("\n");
516 		return;
517 	}
518 
519 	/*
520 	 * Calculate p_clk first, since for DDR3 we need maximum significance.
521 	 * DDR3 rating is not rounded to a multiple of 100.  This results in
522 	 * cycle_time of 1.5ns displayed as PC3-10666.
523 	 *
524 	 * For SDRAM, the speed is provided by the caller so we use it.
525 	 */
526 	d_clk *= 1000 * 1000;
527 	if (speed)
528 		p_clk = speed;
529 	else
530 		p_clk = (d_clk * bits) / 8 / cycle_time;
531 	d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
532 	if (round) {
533 		if ((p_clk % 100) >= 50)
534 			p_clk += 50;
535 		p_clk -= p_clk % 100;
536 	}
537 	aprint_normal(", %dMHz (%s-%d)\n",
538 		      d_clk, ddr_type_string, p_clk);
539 	if (node != NULL)
540 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
541 			       CTLFLAG_IMMEDIATE,
542 			       CTLTYPE_INT, "speed",
543 			       SYSCTL_DESCR("memory speed in MHz"),
544 			       NULL, d_clk, NULL, 0,
545 			       CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
546 }
547 
548 static void
decode_voltage_refresh(device_t self,struct spdmem * s)549 decode_voltage_refresh(device_t self, struct spdmem *s)
550 {
551 	const char *voltage, *refresh;
552 
553 	if (s->sm_voltage < __arraycount(spdmem_voltage_types))
554 		voltage = spdmem_voltage_types[s->sm_voltage];
555 	else
556 		voltage = "unknown";
557 
558 	if (s->sm_refresh < __arraycount(spdmem_refresh_types))
559 		refresh = spdmem_refresh_types[s->sm_refresh];
560 	else
561 		refresh = "unknown";
562 
563 	aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
564 			voltage, refresh,
565 			s->sm_selfrefresh?" (self-refreshing)":"");
566 }
567 
568 static void
decode_edofpm(const struct sysctlnode * node,device_t self,struct spdmem * s)569 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
570 {
571 
572 	aprint_naive("\n");
573 	aprint_normal("\n");
574 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
575 
576 	aprint_normal("\n");
577 	aprint_verbose_dev(self,
578 	    "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
579 	    s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
580 	    s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
581 }
582 
583 static void
decode_rom(const struct sysctlnode * node,device_t self,struct spdmem * s)584 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
585 {
586 
587 	aprint_naive("\n");
588 	aprint_normal("\n");
589 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
590 
591 	aprint_normal("\n");
592 	aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
593 	    s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
594 }
595 
596 static void
decode_sdram(const struct sysctlnode * node,device_t self,struct spdmem * s,int spd_len)597 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
598 	     int spd_len)
599 {
600 	int dimm_size, cycle_time, bits, tAA, i, speed, freq;
601 
602 	aprint_naive("\n");
603 	aprint_normal("\n");
604 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
605 
606 	aprint_normal("%s, %s, ",
607 		(s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
608 			" (registered)":"",
609 		(s->sm_config < __arraycount(spdmem_parity_types))?
610 			spdmem_parity_types[s->sm_config]:"invalid parity");
611 
612 	dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
613 	dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
614 
615 	cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
616 		     s->sm_sdr.sdr_cycle_tenths * 100;
617 	bits = le16toh(s->sm_sdr.sdr_datawidth);
618 	if (s->sm_config == 1 || s->sm_config == 2)
619 		bits -= 8;
620 
621 	/* Calculate speed here - from OpenBSD */
622 	if (spd_len >= 128)
623 		freq = ((uint8_t *)s)[126];
624 	else
625 		freq = 0;
626 	switch (freq) {
627 		/*
628 		 * Must check cycle time since some PC-133 DIMMs
629 		 * actually report PC-100
630 		 */
631 	    case 100:
632 	    case 133:
633 		if (cycle_time < 8000)
634 			speed = 133;
635 		else
636 			speed = 100;
637 		break;
638 	    case 0x66:		/* Legacy DIMMs use _hex_ 66! */
639 	    default:
640 		speed = 66;
641 	}
642 	decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
643 			  "PC", speed);
644 
645 	aprint_verbose_dev(self,
646 	    "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
647 	    s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
648 	    s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
649 	    (cycle_time % 1000) / 100);
650 
651 	tAA  = 0;
652 	for (i = 0; i < 8; i++)
653 		if (s->sm_sdr.sdr_tCAS & (1 << i))
654 			tAA = i;
655 	tAA++;
656 	aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
657 	    s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
658 
659 	decode_voltage_refresh(self, s);
660 }
661 
662 static void
decode_ddr(const struct sysctlnode * node,device_t self,struct spdmem * s)663 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
664 {
665 	int dimm_size, cycle_time, bits, tAA, i;
666 
667 	aprint_naive("\n");
668 	aprint_normal("\n");
669 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
670 
671 	aprint_normal("%s, %s, ",
672 		(s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
673 			" (registered)":"",
674 		(s->sm_config < __arraycount(spdmem_parity_types))?
675 			spdmem_parity_types[s->sm_config]:"invalid parity");
676 
677 	dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
678 	dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
679 
680 	cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
681 		  spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
682 	bits = le16toh(s->sm_ddr.ddr_datawidth);
683 	if (s->sm_config == 1 || s->sm_config == 2)
684 		bits -= 8;
685 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
686 			  "PC", 0);
687 
688 	aprint_verbose_dev(self,
689 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
690 	    s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
691 	    s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
692 	    (cycle_time % 1000 + 50) / 100);
693 
694 	tAA  = 0;
695 	for (i = 2; i < 8; i++)
696 		if (s->sm_ddr.ddr_tCAS & (1 << i))
697 			tAA = i;
698 	tAA /= 2;
699 
700 #define __DDR_ROUND(scale, field)	\
701 		((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
702 
703 	aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
704 		__DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
705 
706 #undef	__DDR_ROUND
707 
708 	decode_voltage_refresh(self, s);
709 }
710 
711 static void
decode_ddr2(const struct sysctlnode * node,device_t self,struct spdmem * s)712 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
713 {
714 	int dimm_size, cycle_time, bits, tAA, i;
715 
716 	aprint_naive("\n");
717 	aprint_normal("\n");
718 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
719 
720 	aprint_normal("%s, %s, ",
721 		(s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
722 			" (registered)":"",
723 		(s->sm_config < __arraycount(spdmem_parity_types))?
724 			spdmem_parity_types[s->sm_config]:"invalid parity");
725 
726 	dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
727 	dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
728 		     s->sm_ddr2.ddr2_banks_per_chip;
729 
730 	cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
731 		 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
732 	bits = s->sm_ddr2.ddr2_datawidth;
733 	if ((s->sm_config & 0x03) != 0)
734 		bits -= 8;
735 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
736 			  "PC2", 0);
737 
738 	aprint_verbose_dev(self,
739 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
740 	    s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
741 	    s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
742 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
743 
744 	tAA  = 0;
745 	for (i = 2; i < 8; i++)
746 		if (s->sm_ddr2.ddr2_tCAS & (1 << i))
747 			tAA = i;
748 
749 #define __DDR2_ROUND(scale, field)	\
750 		((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
751 
752 	aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
753 		__DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
754 
755 #undef	__DDR_ROUND
756 
757 	decode_voltage_refresh(self, s);
758 }
759 
760 static void
print_part(const char * part,size_t pnsize)761 print_part(const char *part, size_t pnsize)
762 {
763 	const char *p = memchr(part, ' ', pnsize);
764 	if (p == NULL)
765 		p = part + pnsize;
766 	aprint_normal(": %.*s\n", (int)(p - part), part);
767 }
768 
769 static u_int
ddr3_value_pico(struct spdmem * s,uint8_t txx_mtb,uint8_t txx_ftb)770 ddr3_value_pico(struct spdmem *s, uint8_t txx_mtb, uint8_t txx_ftb)
771 {
772 	u_int mtb, ftb; /* in picoseconds */
773 	intmax_t signed_txx_ftb;
774 	u_int val;
775 
776 	mtb = (u_int)s->sm_ddr3.ddr3_mtb_dividend * 1000 /
777 	    s->sm_ddr3.ddr3_mtb_divisor;
778 	ftb = (u_int)s->sm_ddr3.ddr3_ftb_dividend * 1000 /
779 	    s->sm_ddr3.ddr3_ftb_divisor;
780 
781 	/* tXX_ftb is signed value */
782 	signed_txx_ftb = (int8_t)txx_ftb;
783 	val = txx_mtb * mtb +
784 	    ((txx_ftb > 127) ? signed_txx_ftb : txx_ftb) * ftb / 1000;
785 
786 	return val;
787 }
788 
789 #define __DDR3_VALUE_PICO(s, field)				\
790 	ddr3_value_pico(s, s->sm_ddr3.ddr3_##field##_mtb,	\
791 	    s->sm_ddr3.ddr3_##field##_ftb)
792 
793 static void
decode_ddr3(const struct sysctlnode * node,device_t self,struct spdmem * s)794 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
795 {
796 	int dimm_size, cycle_time, bits;
797 
798 	aprint_naive("\n");
799 	print_part(s->sm_ddr3.ddr3_part, sizeof(s->sm_ddr3.ddr3_part));
800 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
801 
802 	if (s->sm_ddr3.ddr3_mod_type ==
803 		SPDMEM_DDR3_TYPE_MINI_RDIMM ||
804 	    s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
805 		aprint_normal(" (registered)");
806 	aprint_normal(", %sECC, %stemp-sensor, ",
807 		(s->sm_ddr3.ddr3_hasECC)?"":"no ",
808 		(s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
809 
810 	/*
811 	 * DDR3 size specification is quite different from others
812 	 *
813 	 * Module capacity is defined as
814 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
815 	 *	external_bus_width / internal_bus_width
816 	 * We further divide by 2**20 to get our answer in MB
817 	 */
818 	dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
819 		    (s->sm_ddr3.ddr3_datawidth + 3) -
820 		    (s->sm_ddr3.ddr3_chipwidth + 2);
821 	dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
822 
823 	cycle_time = __DDR3_VALUE_PICO(s, tCKmin);
824 	bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
825 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
826 			  "PC3", 0);
827 
828 	aprint_verbose_dev(self,
829 	    "%d rows, %d cols, %d log. banks, %d phys. banks, "
830 	    "%d.%03dns cycle time\n",
831 	    s->sm_ddr3.ddr3_rows + 12, s->sm_ddr3.ddr3_cols + 9,
832 	    1 << (s->sm_ddr3.ddr3_logbanks + 3),
833 	    s->sm_ddr3.ddr3_physbanks + 1,
834 	    cycle_time/1000, cycle_time % 1000);
835 
836 #define	__DDR3_CYCLES(val)						\
837 	((val / cycle_time) + ((val % cycle_time) ? 1 : 0))
838 
839 	aprint_verbose_dev(self, LATENCY,
840 	    __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tAAmin)),
841 	    __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRCDmin)),
842 	    __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRPmin)),
843 	    __DDR3_CYCLES((s->sm_ddr3.ddr3_tRAS_msb * 256
844 		+ s->sm_ddr3.ddr3_tRAS_lsb) * s->sm_ddr3.ddr3_mtb_dividend
845 		/ s->sm_ddr3.ddr3_mtb_divisor * 1000));
846 
847 #undef	__DDR3_CYCLES
848 
849 	/* For DDR3, Voltage is written in another area */
850 	if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
851 	    || s->sm_ddr3.ddr3_125V) {
852 		aprint_verbose("%s:", device_xname(self));
853 		if (!s->sm_ddr3.ddr3_NOT15V)
854 			aprint_verbose(" 1.5V");
855 		if (s->sm_ddr3.ddr3_135V)
856 			aprint_verbose(" 1.35V");
857 		if (s->sm_ddr3.ddr3_125V)
858 			aprint_verbose(" 1.25V");
859 		aprint_verbose(" operable\n");
860 	}
861 }
862 
863 static void
decode_fbdimm(const struct sysctlnode * node,device_t self,struct spdmem * s)864 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
865 {
866 	int dimm_size, cycle_time, bits;
867 
868 	aprint_naive("\n");
869 	aprint_normal("\n");
870 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
871 
872 	/*
873 	 * FB-DIMM module size calculation is very much like DDR3
874 	 */
875 	dimm_size = s->sm_fbd.fbdimm_rows + 12 +
876 		    s->sm_fbd.fbdimm_cols +  9 - 20 - 3;
877 	dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
878 
879 	cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
880 			    (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
881 		     s->sm_fbd.fbdimm_mtb_divisor;
882 	bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
883 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
884 			  "PC2", 0);
885 
886 	aprint_verbose_dev(self,
887 	    "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
888 	    s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
889 	    1 << (s->sm_fbd.fbdimm_banks + 2),
890 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
891 
892 #define	__FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
893 
894 	aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
895 	    __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
896 	    (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
897 	    s->sm_fbd.fbdimm_tCKmin);
898 
899 #undef	__FBDIMM_CYCLES
900 
901 	decode_voltage_refresh(self, s);
902 }
903 
904 static void
decode_ddr4(const struct sysctlnode * node,device_t self,struct spdmem * s)905 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
906 {
907 	int dimm_size, cycle_time, ranks;
908 	int tAA_clocks, tRCD_clocks, tRP_clocks, tRAS_clocks;
909 
910 	aprint_naive("\n");
911 	print_part(s->sm_ddr4.ddr4_part_number,
912 	    sizeof(s->sm_ddr4.ddr4_part_number));
913 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
914 	if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
915 		aprint_normal(" (%s)",
916 		    spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
917 	aprint_normal(", %sECC, %stemp-sensor, ",
918 		(s->sm_ddr4.ddr4_bus_width_extension) ? "" : "no ",
919 		(s->sm_ddr4.ddr4_has_therm_sensor) ? "" : "no ");
920 
921 	/*
922 	 * DDR4 size calculation from JEDEC spec
923 	 *
924 	 * Module capacity in bytes is defined as
925 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
926 	 *	primary_bus_width / DRAM_width *
927 	 *	logical_ranks_per_DIMM
928 	 *
929 	 * logical_ranks_per DIMM equals package_ranks, but multiply
930 	 * by diecount for 3DS packages
931 	 *
932 	 * We further divide by 2**20 to get our answer in MB
933 	 */
934 	dimm_size = (s->sm_ddr4.ddr4_capacity + 28)	/* chip_capacity */
935 		     - 20				/* convert to MB */
936 		     - 3				/* bits --> bytes */
937 		     + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
938 	switch (s->sm_ddr4.ddr4_device_width) {		/* DRAM width */
939 	case 0:	dimm_size -= 2;
940 		break;
941 	case 1: dimm_size -= 3;
942 		break;
943 	case 2:	dimm_size -= 4;
944 		break;
945 	case 4: dimm_size -= 5;
946 		break;
947 	default:
948 		dimm_size = -1;		/* flag invalid value */
949 	}
950 	if (dimm_size >= 0) {
951 		dimm_size = (1 << dimm_size) *
952 		    (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
953 		if (s->sm_ddr4.ddr4_signal_loading == 2) {
954 			dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
955 		}
956 	}
957 
958 /*
959  * Note that the ddr4_xxx_ftb fields are actually signed offsets from
960  * the corresponding mtb value, so we might have to subtract 256!
961  */
962 #define	__DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 +	\
963 			     s->sm_ddr4.ddr4_##field##_ftb) - 		\
964 			    ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
965 	/*
966 	 * For now, the only value for mtb is 0 = 125ps, and ftb = 1ps
967 	 * so we don't need to figure out the time-base units - just
968 	 * hard-code them for now.
969 	 */
970 	cycle_time = __DDR4_VALUE(tCKAVGmin);
971 	decode_size_speed(self, node, dimm_size, cycle_time, 2,
972 			  1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
973 			  TRUE, "PC4", 0);
974 
975 	ranks = s->sm_ddr4.ddr4_package_ranks + 1;
976 	aprint_verbose_dev(self,
977 	    "%d rows, %d cols, %d ranks%s, %d banks/group, %d bank groups\n",
978 	    s->sm_ddr4.ddr4_rows + 12, s->sm_ddr4.ddr4_cols + 9,
979 	    ranks, (ranks > 1) ? ((s->sm_ddr4.ddr4_rank_mix == 1)
980 		? " (asymmetric)" : " (symmetric)") : "",
981 	    1 << (2 + s->sm_ddr4.ddr4_logbanks),
982 	    1 << s->sm_ddr4.ddr4_bankgroups);
983 
984 	aprint_verbose_dev(self, "%d.%03dns cycle time\n",
985 	    cycle_time / 1000, cycle_time % 1000);
986 
987 	tAA_clocks =  __DDR4_VALUE(tAAmin)  * 1000 / cycle_time;
988 	tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
989 	tRP_clocks =  __DDR4_VALUE(tRPmin)  * 1000 / cycle_time;
990 	tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
991 		       s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
992 
993 /*
994  * Per JEDEC spec, rounding is done by taking the time value, dividing
995  * by the cycle time, subtracting .010 from the result, and then
996  * rounded up to the nearest integer.  Unfortunately, none of their
997  * examples say what to do when the result of the subtraction is already
998  * an integer.  For now, assume that we still round up (so an interval
999  * of exactly 12.010 clock cycles will be printed as 13).
1000  */
1001 #define	__DDR4_ROUND(value) ((value - 10) / 1000 + 1)
1002 
1003 	aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
1004 			   __DDR4_ROUND(tRCD_clocks),
1005 			   __DDR4_ROUND(tRP_clocks),
1006 			   __DDR4_ROUND(tRAS_clocks));
1007 
1008 #undef	__DDR4_VALUE
1009 #undef	__DDR4_ROUND
1010 }
1011