xref: /netbsd-src/sys/dev/ic/spdmem.c (revision 200d779b75dbeafa7bc01fd0f60bc61185f6967b)
1 /* $NetBSD: spdmem.c,v 1.14 2015/05/15 08:44:24 msaitoh Exp $ */
2 
3 /*
4  * Copyright (c) 2007 Nicolas Joly
5  * Copyright (c) 2007 Paul Goyette
6  * Copyright (c) 2007 Tobias Nygren
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Serial Presence Detect (SPD) memory identification
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.14 2015/05/15 08:44:24 msaitoh Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45 
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49 
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 	int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60 
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 			      int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64 
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66 
67 static const char* const spdmem_basic_types[] = {
68 	"unknown",
69 	"FPM",
70 	"EDO",
71 	"Pipelined Nibble",
72 	"SDRAM",
73 	"ROM",
74 	"DDR SGRAM",
75 	"DDR SDRAM",
76 	"DDR2 SDRAM",
77 	"DDR2 SDRAM FB",
78 	"DDR2 SDRAM FB Probe",
79 	"DDR3 SDRAM",
80 	"DDR4 SDRAM"
81 };
82 
83 static const char* const spdmem_ddr4_module_types[] = {
84 	"DDR4 Extended",
85 	"DDR4 RDIMM",
86 	"DDR4 UDIMM",
87 	"DDR4 SO-DIMM",
88 	"DDR4 Load-Reduced DIMM",
89 	"DDR4 Mini-RDIMM",
90 	"DDR4 Mini-UDIMM",
91 	"DDR4 Reserved",
92 	"DDR4 72Bit SO-RDIMM",
93 	"DDR4 72Bit SO-UDIMM",
94 	"DDR4 Undefined",
95 	"DDR4 Reserved",
96 	"DDR4 16Bit SO-DIMM",
97 	"DDR4 32Bit SO-DIMM",
98 	"DDR4 Reserved",
99 	"DDR4 Undefined"
100 };
101 
102 static const char* const spdmem_superset_types[] = {
103 	"unknown",
104 	"ESDRAM",
105 	"DDR ESDRAM",
106 	"PEM EDO",
107 	"PEM SDRAM"
108 };
109 
110 static const char* const spdmem_voltage_types[] = {
111 	"TTL (5V tolerant)",
112 	"LvTTL (not 5V tolerant)",
113 	"HSTL 1.5V",
114 	"SSTL 3.3V",
115 	"SSTL 2.5V",
116 	"SSTL 1.8V"
117 };
118 
119 static const char* const spdmem_refresh_types[] = {
120 	"15.625us",
121 	"3.9us",
122 	"7.8us",
123 	"31.3us",
124 	"62.5us",
125 	"125us"
126 };
127 
128 static const char* const spdmem_parity_types[] = {
129 	"no parity or ECC",
130 	"data parity",
131 	"data ECC",
132 	"data parity and ECC",
133 	"cmd/addr parity",
134 	"cmd/addr/data parity",
135 	"cmd/addr parity, data ECC",
136 	"cmd/addr/data parity, data ECC"
137 };
138 
139 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
140 
141 
142 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
143 static const uint16_t spdmem_cycle_frac[] = {
144 	0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
145 	250, 333, 667, 750, 999, 999
146 };
147 
148 /* Format string for timing info */
149 #define	LATENCY	"tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
150 
151 /* CRC functions used for certain memory types */
152 
153 static uint16_t spdcrc16 (struct spdmem_softc *sc, int count)
154 {
155 	uint16_t crc;
156 	int i, j;
157 	uint8_t val;
158 	crc = 0;
159 	for (j = 0; j <= count; j++) {
160 		val = (sc->sc_read)(sc, j);
161 		crc = crc ^ val << 8;
162 		for (i = 0; i < 8; ++i)
163 			if (crc & 0x8000)
164 				crc = crc << 1 ^ 0x1021;
165 			else
166 				crc = crc << 1;
167 	}
168 	return (crc & 0xFFFF);
169 }
170 
171 int
172 spdmem_common_probe(struct spdmem_softc *sc)
173 {
174 	int cksum = 0;
175 	uint8_t i, val, spd_type;
176 	int spd_len, spd_crc_cover;
177 	uint16_t crc_calc, crc_spd;
178 
179 	spd_type = (sc->sc_read)(sc, 2);
180 
181 	/* For older memory types, validate the checksum over 1st 63 bytes */
182 	if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
183 		for (i = 0; i < 63; i++)
184 			cksum += (sc->sc_read)(sc, i);
185 
186 		val = (sc->sc_read)(sc, 63);
187 
188 		if (cksum == 0 || (cksum & 0xff) != val) {
189 			aprint_debug("spd checksum failed, calc = 0x%02x, "
190 				     "spd = 0x%02x\n", cksum, val);
191 			return 0;
192 		} else
193 			return 1;
194 	}
195 
196 	/* For DDR3 and FBDIMM, verify the CRC */
197 	else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
198 		spd_len = (sc->sc_read)(sc, 0);
199 		if (spd_len & SPDMEM_SPDCRC_116)
200 			spd_crc_cover = 116;
201 		else
202 			spd_crc_cover = 125;
203 		switch (spd_len & SPDMEM_SPDLEN_MASK) {
204 		case SPDMEM_SPDLEN_128:
205 			spd_len = 128;
206 			break;
207 		case SPDMEM_SPDLEN_176:
208 			spd_len = 176;
209 			break;
210 		case SPDMEM_SPDLEN_256:
211 			spd_len = 256;
212 			break;
213 		default:
214 			return 0;
215 		}
216 		if (spd_crc_cover > spd_len)
217 			return 0;
218 		crc_calc = spdcrc16(sc, spd_crc_cover);
219 		crc_spd = (sc->sc_read)(sc, 127) << 8;
220 		crc_spd |= (sc->sc_read)(sc, 126);
221 		if (crc_calc != crc_spd) {
222 			aprint_debug("crc16 failed, covers %d bytes, "
223 				     "calc = 0x%04x, spd = 0x%04x\n",
224 				     spd_crc_cover, crc_calc, crc_spd);
225 			return 0;
226 		}
227 		return 1;
228 	} else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
229 		spd_len = (sc->sc_read)(sc, 0) & 0x0f;
230 		if ((unsigned int)spd_len > __arraycount(spd_rom_sizes))
231 			return 0;
232 		spd_len = spd_rom_sizes[spd_len];
233 		spd_crc_cover=128;
234 		if (spd_crc_cover > spd_len)
235 			return 0;
236 		crc_calc = spdcrc16(sc, spd_crc_cover);
237 		crc_spd = (sc->sc_read)(sc, 127) << 8;
238 		crc_spd |= (sc->sc_read)(sc, 126);
239 		if (crc_calc != crc_spd) {
240 			aprint_debug("crc16 failed, covers %d bytes, "
241 				     "calc = 0x%04x, spd = 0x%04x\n",
242 				     spd_crc_cover, crc_calc, crc_spd);
243 			return 0;
244 		}
245 		/*
246 		 * We probably could also verify the CRC for the other
247 		 * "pages" of SPD data in blocks 1 and 2, but we'll do
248 		 * it some other time.
249 		 */
250 		return 1;
251 	} else
252 		return 0;
253 
254 	/* For unrecognized memory types, don't match at all */
255 	return 0;
256 }
257 
258 void
259 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
260 {
261 	struct spdmem *s = &(sc->sc_spd_data);
262 	const char *type;
263 	const char *rambus_rev = "Reserved";
264 	int dimm_size;
265 	unsigned int i, spd_len, spd_size;
266 	const struct sysctlnode *node = NULL;
267 
268 	s->sm_len = (sc->sc_read)(sc, 0);
269 	s->sm_size = (sc->sc_read)(sc, 1);
270 	s->sm_type = (sc->sc_read)(sc, 2);
271 
272 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
273 		/*
274 		 * An even newer encoding with one byte holding both
275 		 * the used-size and capacity values
276 		 */
277 		spd_len = s->sm_len & 0x0f;
278 		spd_size = (s->sm_len >> 4) & 0x07;
279 
280 		spd_len = spd_rom_sizes[spd_len];
281 		spd_size *= 512;
282 
283 	} else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
284 		/*
285 		 * FBDIMM and DDR3 (and probably all newer) have a different
286 		 * encoding of the SPD EEPROM used/total sizes
287 		 */
288 		spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
289 		switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
290 		case SPDMEM_SPDLEN_128:
291 			spd_len = 128;
292 			break;
293 		case SPDMEM_SPDLEN_176:
294 			spd_len = 176;
295 			break;
296 		case SPDMEM_SPDLEN_256:
297 			spd_len = 256;
298 			break;
299 		default:
300 			spd_len = 64;
301 			break;
302 		}
303 	} else {
304 		spd_size = 1 << s->sm_size;
305 		spd_len = s->sm_len;
306 		if (spd_len < 64)
307 			spd_len = 64;
308 	}
309 	if (spd_len > spd_size)
310 		spd_len = spd_size;
311 	if (spd_len > sizeof(struct spdmem))
312 		spd_len = sizeof(struct spdmem);
313 	for (i = 3; i < spd_len; i++)
314 		((uint8_t *)s)[i] = (sc->sc_read)(sc, i);
315 
316 	/*
317 	 * Setup our sysctl subtree, hw.spdmemN
318 	 */
319 	sc->sc_sysctl_log = NULL;
320 	sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
321 	    0, CTLTYPE_NODE,
322 	    device_xname(self), NULL, NULL, 0, NULL, 0,
323 	    CTL_HW, CTL_CREATE, CTL_EOL);
324 	if (node != NULL && spd_len != 0)
325                 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
326                     0,
327                     CTLTYPE_STRUCT, "spd_data",
328 		    SYSCTL_DESCR("raw spd data"), NULL,
329                     0, s, spd_len,
330                     CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
331 
332 	/*
333 	 * Decode and print key SPD contents
334 	 */
335 	if (IS_RAMBUS_TYPE) {
336 		if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
337 			type = "Rambus";
338 		else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
339 			type = "Direct Rambus";
340 		else
341 			type = "Rambus (unknown)";
342 
343 		switch (s->sm_len) {
344 		case 0:
345 			rambus_rev = "Invalid";
346 			break;
347 		case 1:
348 			rambus_rev = "0.7";
349 			break;
350 		case 2:
351 			rambus_rev = "1.0";
352 			break;
353 		default:
354 			rambus_rev = "Reserved";
355 			break;
356 		}
357 	} else {
358 		if (s->sm_type < __arraycount(spdmem_basic_types))
359 			type = spdmem_basic_types[s->sm_type];
360 		else
361 			type = "unknown memory type";
362 
363 		if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
364 		    s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
365 			type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
366 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
367 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
368 			type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
369 		if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
370 		    s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
371 			type =
372 			    spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
373 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
374 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
375 			type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
376 		}
377 		if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
378 		    s->sm_ddr4.ddr4_mod_type <
379 				__arraycount(spdmem_ddr4_module_types)) {
380 			type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
381 		}
382 	}
383 
384 	strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
385 	if (node != NULL)
386 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
387 		    0,
388 		    CTLTYPE_STRING, "mem_type",
389 		    SYSCTL_DESCR("memory module type"), NULL,
390 		    0, sc->sc_type, 0,
391 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
392 
393 	if (IS_RAMBUS_TYPE) {
394 		aprint_naive("\n");
395 		aprint_normal("\n");
396 		aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
397 		dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
398 		if (dimm_size >= 1024)
399 			aprint_normal(", %dGB\n", dimm_size / 1024);
400 		else
401 			aprint_normal(", %dMB\n", dimm_size);
402 
403 		/* No further decode for RAMBUS memory */
404 		return;
405 	}
406 	switch (s->sm_type) {
407 	case SPDMEM_MEMTYPE_EDO:
408 	case SPDMEM_MEMTYPE_FPM:
409 		decode_edofpm(node, self, s);
410 		break;
411 	case SPDMEM_MEMTYPE_ROM:
412 		decode_rom(node, self, s);
413 		break;
414 	case SPDMEM_MEMTYPE_SDRAM:
415 		decode_sdram(node, self, s, spd_len);
416 		break;
417 	case SPDMEM_MEMTYPE_DDRSDRAM:
418 		decode_ddr(node, self, s);
419 		break;
420 	case SPDMEM_MEMTYPE_DDR2SDRAM:
421 		decode_ddr2(node, self, s);
422 		break;
423 	case SPDMEM_MEMTYPE_DDR3SDRAM:
424 		decode_ddr3(node, self, s);
425 		break;
426 	case SPDMEM_MEMTYPE_FBDIMM:
427 	case SPDMEM_MEMTYPE_FBDIMM_PROBE:
428 		decode_fbdimm(node, self, s);
429 		break;
430 	case SPDMEM_MEMTYPE_DDR4SDRAM:
431 		decode_ddr4(node, self, s);
432 		break;
433 	}
434 
435 	/* Dump SPD */
436 	for (i = 0; i < spd_len;  i += 16) {
437 		unsigned int j, k;
438 		aprint_debug_dev(self, "0x%02x:", i);
439 		k = (spd_len > (i + 16)) ? i + 16 : spd_len;
440 		for (j = i; j < k; j++)
441 			aprint_debug(" %02x", ((uint8_t *)s)[j]);
442 		aprint_debug("\n");
443 	}
444 }
445 
446 int
447 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
448 {
449 	sysctl_teardown(&sc->sc_sysctl_log);
450 
451 	return 0;
452 }
453 
454 static void
455 decode_size_speed(device_t self, const struct sysctlnode *node,
456 		  int dimm_size, int cycle_time, int d_clk, int bits,
457 		  bool round, const char *ddr_type_string, int speed)
458 {
459 	int p_clk;
460 	struct spdmem_softc *sc = device_private(self);
461 
462 	if (dimm_size < 1024)
463 		aprint_normal("%dMB", dimm_size);
464 	else
465 		aprint_normal("%dGB", dimm_size / 1024);
466 	if (node != NULL)
467 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
468 		    CTLFLAG_IMMEDIATE,
469 		    CTLTYPE_INT, "size",
470 		    SYSCTL_DESCR("module size in MB"), NULL,
471 		    dimm_size, NULL, 0,
472 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
473 
474 	if (cycle_time == 0) {
475 		aprint_normal("\n");
476 		return;
477 	}
478 
479 	/*
480 	 * Calculate p_clk first, since for DDR3 we need maximum significance.
481 	 * DDR3 rating is not rounded to a multiple of 100.  This results in
482 	 * cycle_time of 1.5ns displayed as PC3-10666.
483 	 *
484 	 * For SDRAM, the speed is provided by the caller so we use it.
485 	 */
486 	d_clk *= 1000 * 1000;
487 	if (speed)
488 		p_clk = speed;
489 	else
490 		p_clk = (d_clk * bits) / 8 / cycle_time;
491 	d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
492 	if (round) {
493 		if ((p_clk % 100) >= 50)
494 			p_clk += 50;
495 		p_clk -= p_clk % 100;
496 	}
497 	aprint_normal(", %dMHz (%s-%d)\n",
498 		      d_clk, ddr_type_string, p_clk);
499 	if (node != NULL)
500 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
501 			       CTLFLAG_IMMEDIATE,
502 			       CTLTYPE_INT, "speed",
503 			       SYSCTL_DESCR("memory speed in MHz"),
504 			       NULL, d_clk, NULL, 0,
505 			       CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
506 }
507 
508 static void
509 decode_voltage_refresh(device_t self, struct spdmem *s)
510 {
511 	const char *voltage, *refresh;
512 
513 	if (s->sm_voltage < __arraycount(spdmem_voltage_types))
514 		voltage = spdmem_voltage_types[s->sm_voltage];
515 	else
516 		voltage = "unknown";
517 
518 	if (s->sm_refresh < __arraycount(spdmem_refresh_types))
519 		refresh = spdmem_refresh_types[s->sm_refresh];
520 	else
521 		refresh = "unknown";
522 
523 	aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
524 			voltage, refresh,
525 			s->sm_selfrefresh?" (self-refreshing)":"");
526 }
527 
528 static void
529 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s) {
530 	aprint_naive("\n");
531 	aprint_normal("\n");
532 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
533 
534 	aprint_normal("\n");
535 	aprint_verbose_dev(self,
536 	    "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
537 	    s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
538 	    s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
539 }
540 
541 static void
542 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s) {
543 	aprint_naive("\n");
544 	aprint_normal("\n");
545 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
546 
547 	aprint_normal("\n");
548 	aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
549 	    s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
550 }
551 
552 static void
553 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
554 	     int spd_len) {
555 	int dimm_size, cycle_time, bits, tAA, i, speed, freq;
556 
557 	aprint_naive("\n");
558 	aprint_normal("\n");
559 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
560 
561 	aprint_normal("%s, %s, ",
562 		(s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
563 			" (registered)":"",
564 		(s->sm_config < __arraycount(spdmem_parity_types))?
565 			spdmem_parity_types[s->sm_config]:"invalid parity");
566 
567 	dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
568 	dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
569 
570 	cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
571 		     s->sm_sdr.sdr_cycle_tenths * 100;
572 	bits = le16toh(s->sm_sdr.sdr_datawidth);
573 	if (s->sm_config == 1 || s->sm_config == 2)
574 		bits -= 8;
575 
576 	/* Calculate speed here - from OpenBSD */
577 	if (spd_len >= 128)
578 		freq = ((uint8_t *)s)[126];
579 	else
580 		freq = 0;
581 	switch (freq) {
582 		/*
583 		 * Must check cycle time since some PC-133 DIMMs
584 		 * actually report PC-100
585 		 */
586 	    case 100:
587 	    case 133:
588 		if (cycle_time < 8000)
589 			speed = 133;
590 		else
591 			speed = 100;
592 		break;
593 	    case 0x66:		/* Legacy DIMMs use _hex_ 66! */
594 	    default:
595 		speed = 66;
596 	}
597 	decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
598 			  "PC", speed);
599 
600 	aprint_verbose_dev(self,
601 	    "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
602 	    s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
603 	    s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
604 	    (cycle_time % 1000) / 100);
605 
606 	tAA  = 0;
607 	for (i = 0; i < 8; i++)
608 		if (s->sm_sdr.sdr_tCAS & (1 << i))
609 			tAA = i;
610 	tAA++;
611 	aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
612 	    s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
613 
614 	decode_voltage_refresh(self, s);
615 }
616 
617 static void
618 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s) {
619 	int dimm_size, cycle_time, bits, tAA, i;
620 
621 	aprint_naive("\n");
622 	aprint_normal("\n");
623 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
624 
625 	aprint_normal("%s, %s, ",
626 		(s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
627 			" (registered)":"",
628 		(s->sm_config < __arraycount(spdmem_parity_types))?
629 			spdmem_parity_types[s->sm_config]:"invalid parity");
630 
631 	dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
632 	dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
633 
634 	cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
635 		  spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
636 	bits = le16toh(s->sm_ddr.ddr_datawidth);
637 	if (s->sm_config == 1 || s->sm_config == 2)
638 		bits -= 8;
639 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
640 			  "PC", 0);
641 
642 	aprint_verbose_dev(self,
643 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
644 	    s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
645 	    s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
646 	    (cycle_time % 1000 + 50) / 100);
647 
648 	tAA  = 0;
649 	for (i = 2; i < 8; i++)
650 		if (s->sm_ddr.ddr_tCAS & (1 << i))
651 			tAA = i;
652 	tAA /= 2;
653 
654 #define __DDR_ROUND(scale, field)	\
655 		((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
656 
657 	aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
658 		__DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
659 
660 #undef	__DDR_ROUND
661 
662 	decode_voltage_refresh(self, s);
663 }
664 
665 static void
666 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s) {
667 	int dimm_size, cycle_time, bits, tAA, i;
668 
669 	aprint_naive("\n");
670 	aprint_normal("\n");
671 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
672 
673 	aprint_normal("%s, %s, ",
674 		(s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
675 			" (registered)":"",
676 		(s->sm_config < __arraycount(spdmem_parity_types))?
677 			spdmem_parity_types[s->sm_config]:"invalid parity");
678 
679 	dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
680 	dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
681 		     s->sm_ddr2.ddr2_banks_per_chip;
682 
683 	cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
684 		 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
685 	bits = s->sm_ddr2.ddr2_datawidth;
686 	if ((s->sm_config & 0x03) != 0)
687 		bits -= 8;
688 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
689 			  "PC2", 0);
690 
691 	aprint_verbose_dev(self,
692 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
693 	    s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
694 	    s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
695 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
696 
697 	tAA  = 0;
698 	for (i = 2; i < 8; i++)
699 		if (s->sm_ddr2.ddr2_tCAS & (1 << i))
700 			tAA = i;
701 
702 #define __DDR2_ROUND(scale, field)	\
703 		((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
704 
705 	aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
706 		__DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
707 
708 #undef	__DDR_ROUND
709 
710 	decode_voltage_refresh(self, s);
711 }
712 
713 static void
714 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s) {
715 	int dimm_size, cycle_time, bits;
716 
717 	aprint_naive("\n");
718 	aprint_normal(": %18s\n", s->sm_ddr3.ddr3_part);
719 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
720 
721 	if (s->sm_ddr3.ddr3_mod_type ==
722 		SPDMEM_DDR3_TYPE_MINI_RDIMM ||
723 	    s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
724 		aprint_normal(" (registered)");
725 	aprint_normal(", %sECC, %stemp-sensor, ",
726 		(s->sm_ddr3.ddr3_hasECC)?"":"no ",
727 		(s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
728 
729 	/*
730 	 * DDR3 size specification is quite different from others
731 	 *
732 	 * Module capacity is defined as
733 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
734 	 *	external_bus_width / internal_bus_width
735 	 * We further divide by 2**20 to get our answer in MB
736 	 */
737 	dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
738 		    (s->sm_ddr3.ddr3_datawidth + 3) -
739 		    (s->sm_ddr3.ddr3_chipwidth + 2);
740 	dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
741 
742 	cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend +
743 			    (s->sm_ddr3.ddr3_mtb_divisor / 2)) /
744 		     s->sm_ddr3.ddr3_mtb_divisor;
745 	cycle_time *= s->sm_ddr3.ddr3_tCKmin;
746 	bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
747 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
748 			  "PC3", 0);
749 
750 	aprint_verbose_dev(self,
751 	    "%d rows, %d cols, %d log. banks, %d phys. banks, "
752 	    "%d.%03dns cycle time\n",
753 	    s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
754 	    1 << (s->sm_ddr3.ddr3_logbanks + 3),
755 	    s->sm_ddr3.ddr3_physbanks + 1,
756 	    cycle_time/1000, cycle_time % 1000);
757 
758 #define	__DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin)
759 
760 	aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin),
761 		__DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin),
762 		(s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) /
763 		    s->sm_ddr3.ddr3_tCKmin);
764 
765 #undef	__DDR3_CYCLES
766 
767 	/* For DDR3, Voltage is written in another area */
768 	if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
769 	    || s->sm_ddr3.ddr3_125V) {
770 		aprint_verbose("%s:", device_xname(self));
771 		if (!s->sm_ddr3.ddr3_NOT15V)
772 			aprint_verbose(" 1.5V");
773 		if (s->sm_ddr3.ddr3_135V)
774 			aprint_verbose(" 1.35V");
775 		if (s->sm_ddr3.ddr3_125V)
776 			aprint_verbose(" 1.25V");
777 		aprint_verbose(" operable\n");
778 	}
779 }
780 
781 static void
782 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s) {
783 	int dimm_size, cycle_time, bits;
784 
785 	aprint_naive("\n");
786 	aprint_normal("\n");
787 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
788 
789 	/*
790 	 * FB-DIMM module size calculation is very much like DDR3
791 	 */
792 	dimm_size = s->sm_fbd.fbdimm_rows + 12 +
793 		    s->sm_fbd.fbdimm_cols +  9 - 20 - 3;
794 	dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
795 
796 	cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
797 			    (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
798 		     s->sm_fbd.fbdimm_mtb_divisor;
799 	bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
800 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
801 			  "PC2", 0);
802 
803 	aprint_verbose_dev(self,
804 	    "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
805 	    s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
806 	    1 << (s->sm_fbd.fbdimm_banks + 2),
807 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
808 
809 #define	__FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
810 
811 	aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
812 		__FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
813 		(s->sm_fbd.fbdimm_tRAS_msb * 256 +
814 			s->sm_fbd.fbdimm_tRAS_lsb) /
815 		    s->sm_fbd.fbdimm_tCKmin);
816 
817 #undef	__FBDIMM_CYCLES
818 
819 	decode_voltage_refresh(self, s);
820 }
821 
822 static void
823 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s) {
824 	int dimm_size, cycle_time;
825 	int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks;
826 
827 	aprint_naive("\n");
828 	aprint_normal(": %20s\n", s->sm_ddr4.ddr4_part_number);
829 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
830 	if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
831 		aprint_normal(" (%s)",
832 		    spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
833 	aprint_normal(", %stemp-sensor, ",
834 		(s->sm_ddr4.ddr4_has_therm_sensor)?"":"no ");
835 
836 	/*
837 	 * DDR4 size calculation from JEDEC spec
838 	 *
839 	 * Module capacity in bytes is defined as
840 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
841 	 *	primary_bus_width / DRAM_width *
842 	 *	logical_ranks_per_DIMM
843 	 *
844 	 * logical_ranks_per DIMM equals package_ranks, but multiply
845 	 * by diecount for 3DS packages
846 	 *
847 	 * We further divide by 2**20 to get our answer in MB
848 	 */
849 	dimm_size = (s->sm_ddr4.ddr4_capacity + 28)	/* chip_capacity */
850 		     - 20				/* convert to MB */
851 		     - 3				/* bits --> bytes */
852 		     + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
853 	switch (s->sm_ddr4.ddr4_device_width) {		/* DRAM width */
854 	case 0:	dimm_size -= 2;
855 		break;
856 	case 1: dimm_size -= 3;
857 		break;
858 	case 2:	dimm_size -= 4;
859 		break;
860 	case 4: dimm_size -= 5;
861 		break;
862 	default:
863 		dimm_size = -1;		/* flag invalid value */
864 	}
865 	if (dimm_size >=0) {
866 		dimm_size = (1 << dimm_size) *
867 		    (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
868 		if (s->sm_ddr4.ddr4_signal_loading == 2) {
869 			dimm_size *= s->sm_ddr4.ddr4_diecount;
870 		}
871 	}
872 
873 	/*
874 	 * For now, the only value for mtb is 1 = 125ps, and ftp = 1ps
875 	 * so we don't need to figure out the time-base units - just
876 	 * hard-code them for now.
877 	 */
878 	cycle_time = 125 * s->sm_ddr4.ddr4_tCKAVGmin_mtb +
879 			   s->sm_ddr4.ddr4_tCKAVGmin_ftb;
880 	aprint_normal("%d MB, %d.%03dns cycle time (%dMHz)\n", dimm_size,
881 	    cycle_time/1000, cycle_time % 1000, 1000000 / cycle_time);
882 
883 	decode_size_speed(self, node, dimm_size, cycle_time, 2,
884 			  1 << (s->sm_ddr4.ddr4_device_width + 3),
885 			  TRUE, "PC4", 0);
886 
887 	aprint_verbose_dev(self,
888 	    "%d rows, %d cols, %d banks, %d bank groups\n",
889 	    s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
890 	    1 << (2 + s->sm_ddr4.ddr4_logbanks),
891 	    1 << s->sm_ddr4.ddr4_bankgroups);
892 
893 /*
894  * Note that the ddr4_xxx_ftb fields are actually signed offsets from
895  * the corresponding mtb value, so we might have to subtract 256!
896  */
897 #define	__DDR4_VALUE(field) (s->sm_ddr4.ddr4_##field##_mtb * 256 +	\
898 			     s->sm_ddr4.ddr4_##field##_ftb) - 		\
899 			     ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0)
900 
901 	tAA_clocks =  (__DDR4_VALUE(tAAmin)  * 1000 ) / cycle_time;
902 	tRP_clocks =  (__DDR4_VALUE(tRPmin)  * 1000 ) / cycle_time;
903 	tRCD_clocks = (__DDR4_VALUE(tRCDmin) * 1000 ) / cycle_time;
904 	tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
905 		       s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
906 
907 /*
908  * Per JEDEC spec, rounding is done by taking the time value, dividing
909  * by the cycle time, subtracting .010 from the result, and then
910  * rounded up to the nearest integer.  Unfortunately, none of their
911  * examples say what to do when the result of the subtraction is already
912  * an integer.  For now, assume that we still round up (so an interval
913  * of exactly 12.010 clock cycles will be printed as 13).
914  */
915 #define	__DDR4_ROUND(value) ((value - 10) / 1000 + 1)
916 
917 	aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
918 			   __DDR4_ROUND(tRP_clocks),
919 			   __DDR4_ROUND(tRCD_clocks),
920 			   __DDR4_ROUND(tRAS_clocks));
921 
922 #undef	__DDR4_VALUE
923 #undef	__DDR4_ROUND
924 }
925