xref: /netbsd-src/sys/dev/ic/spdmem.c (revision 63aea4bd5b445e491ff0389fe27ec78b3099dba3)
1 /* $NetBSD: spdmem.c,v 1.19 2015/12/08 02:09:23 pgoyette Exp $ */
2 
3 /*
4  * Copyright (c) 2007 Nicolas Joly
5  * Copyright (c) 2007 Paul Goyette
6  * Copyright (c) 2007 Tobias Nygren
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Serial Presence Detect (SPD) memory identification
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.19 2015/12/08 02:09:23 pgoyette Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45 
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49 
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 	int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60 
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 			      int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64 
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66 
67 static const char* const spdmem_basic_types[] = {
68 	"unknown",
69 	"FPM",
70 	"EDO",
71 	"Pipelined Nibble",
72 	"SDRAM",
73 	"ROM",
74 	"DDR SGRAM",
75 	"DDR SDRAM",
76 	"DDR2 SDRAM",
77 	"DDR2 SDRAM FB",
78 	"DDR2 SDRAM FB Probe",
79 	"DDR3 SDRAM",
80 	"DDR4 SDRAM"
81 };
82 
83 static const char* const spdmem_ddr4_module_types[] = {
84 	"DDR4 Extended",
85 	"DDR4 RDIMM",
86 	"DDR4 UDIMM",
87 	"DDR4 SO-DIMM",
88 	"DDR4 Load-Reduced DIMM",
89 	"DDR4 Mini-RDIMM",
90 	"DDR4 Mini-UDIMM",
91 	"DDR4 Reserved",
92 	"DDR4 72Bit SO-RDIMM",
93 	"DDR4 72Bit SO-UDIMM",
94 	"DDR4 Undefined",
95 	"DDR4 Reserved",
96 	"DDR4 16Bit SO-DIMM",
97 	"DDR4 32Bit SO-DIMM",
98 	"DDR4 Reserved",
99 	"DDR4 Undefined"
100 };
101 
102 static const char* const spdmem_superset_types[] = {
103 	"unknown",
104 	"ESDRAM",
105 	"DDR ESDRAM",
106 	"PEM EDO",
107 	"PEM SDRAM"
108 };
109 
110 static const char* const spdmem_voltage_types[] = {
111 	"TTL (5V tolerant)",
112 	"LvTTL (not 5V tolerant)",
113 	"HSTL 1.5V",
114 	"SSTL 3.3V",
115 	"SSTL 2.5V",
116 	"SSTL 1.8V"
117 };
118 
119 static const char* const spdmem_refresh_types[] = {
120 	"15.625us",
121 	"3.9us",
122 	"7.8us",
123 	"31.3us",
124 	"62.5us",
125 	"125us"
126 };
127 
128 static const char* const spdmem_parity_types[] = {
129 	"no parity or ECC",
130 	"data parity",
131 	"data ECC",
132 	"data parity and ECC",
133 	"cmd/addr parity",
134 	"cmd/addr/data parity",
135 	"cmd/addr parity, data ECC",
136 	"cmd/addr/data parity, data ECC"
137 };
138 
139 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
140 
141 
142 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
143 static const uint16_t spdmem_cycle_frac[] = {
144 	0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
145 	250, 333, 667, 750, 999, 999
146 };
147 
148 /* Format string for timing info */
149 #define	LATENCY	"tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
150 
151 /* CRC functions used for certain memory types */
152 
153 static uint16_t spdcrc16 (struct spdmem_softc *sc, int count)
154 {
155 	uint16_t crc;
156 	int i, j;
157 	uint8_t val;
158 	crc = 0;
159 	for (j = 0; j <= count; j++) {
160 		val = (sc->sc_read)(sc, j);
161 		crc = crc ^ val << 8;
162 		for (i = 0; i < 8; ++i)
163 			if (crc & 0x8000)
164 				crc = crc << 1 ^ 0x1021;
165 			else
166 				crc = crc << 1;
167 	}
168 	return (crc & 0xFFFF);
169 }
170 
171 int
172 spdmem_common_probe(struct spdmem_softc *sc)
173 {
174 	int cksum = 0;
175 	uint8_t i, val, spd_type;
176 	int spd_len, spd_crc_cover;
177 	uint16_t crc_calc, crc_spd;
178 
179 	spd_type = (sc->sc_read)(sc, 2);
180 
181 	/* For older memory types, validate the checksum over 1st 63 bytes */
182 	if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
183 		for (i = 0; i < 63; i++)
184 			cksum += (sc->sc_read)(sc, i);
185 
186 		val = (sc->sc_read)(sc, 63);
187 
188 		if (cksum == 0 || (cksum & 0xff) != val) {
189 			aprint_debug("spd checksum failed, calc = 0x%02x, "
190 				     "spd = 0x%02x\n", cksum, val);
191 			return 0;
192 		} else
193 			return 1;
194 	}
195 
196 	/* For DDR3 and FBDIMM, verify the CRC */
197 	else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
198 		spd_len = (sc->sc_read)(sc, 0);
199 		if (spd_len & SPDMEM_SPDCRC_116)
200 			spd_crc_cover = 116;
201 		else
202 			spd_crc_cover = 125;
203 		switch (spd_len & SPDMEM_SPDLEN_MASK) {
204 		case SPDMEM_SPDLEN_128:
205 			spd_len = 128;
206 			break;
207 		case SPDMEM_SPDLEN_176:
208 			spd_len = 176;
209 			break;
210 		case SPDMEM_SPDLEN_256:
211 			spd_len = 256;
212 			break;
213 		default:
214 			return 0;
215 		}
216 		if (spd_crc_cover > spd_len)
217 			return 0;
218 		crc_calc = spdcrc16(sc, spd_crc_cover);
219 		crc_spd = (sc->sc_read)(sc, 127) << 8;
220 		crc_spd |= (sc->sc_read)(sc, 126);
221 		if (crc_calc != crc_spd) {
222 			aprint_debug("crc16 failed, covers %d bytes, "
223 				     "calc = 0x%04x, spd = 0x%04x\n",
224 				     spd_crc_cover, crc_calc, crc_spd);
225 			return 0;
226 		}
227 		return 1;
228 	} else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
229 		spd_len = (sc->sc_read)(sc, 0) & 0x0f;
230 		if ((unsigned int)spd_len > __arraycount(spd_rom_sizes))
231 			return 0;
232 		spd_len = spd_rom_sizes[spd_len];
233 		spd_crc_cover = 125; /* For byte 0 to 125 */
234 		if (spd_crc_cover > spd_len)
235 			return 0;
236 		crc_calc = spdcrc16(sc, spd_crc_cover);
237 		crc_spd = (sc->sc_read)(sc, 127) << 8;
238 		crc_spd |= (sc->sc_read)(sc, 126);
239 		if (crc_calc != crc_spd) {
240 			aprint_debug("crc16 failed, covers %d bytes, "
241 				     "calc = 0x%04x, spd = 0x%04x\n",
242 				     spd_crc_cover, crc_calc, crc_spd);
243 			return 0;
244 		}
245 		/*
246 		 * We probably could also verify the CRC for the other
247 		 * "pages" of SPD data in blocks 1 and 2, but we'll do
248 		 * it some other time.
249 		 */
250 		return 1;
251 	} else
252 		return 0;
253 
254 	/* For unrecognized memory types, don't match at all */
255 	return 0;
256 }
257 
258 void
259 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
260 {
261 	struct spdmem *s = &(sc->sc_spd_data);
262 	const char *type;
263 	const char *rambus_rev = "Reserved";
264 	int dimm_size;
265 	unsigned int i, spd_len, spd_size;
266 	const struct sysctlnode *node = NULL;
267 
268 	s->sm_len = (sc->sc_read)(sc, 0);
269 	s->sm_size = (sc->sc_read)(sc, 1);
270 	s->sm_type = (sc->sc_read)(sc, 2);
271 
272 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
273 		/*
274 		 * An even newer encoding with one byte holding both
275 		 * the used-size and capacity values
276 		 */
277 		spd_len = s->sm_len & 0x0f;
278 		spd_size = (s->sm_len >> 4) & 0x07;
279 
280 		spd_len = spd_rom_sizes[spd_len];
281 		spd_size *= 512;
282 
283 	} else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
284 		/*
285 		 * FBDIMM and DDR3 (and probably all newer) have a different
286 		 * encoding of the SPD EEPROM used/total sizes
287 		 */
288 		spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
289 		switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
290 		case SPDMEM_SPDLEN_128:
291 			spd_len = 128;
292 			break;
293 		case SPDMEM_SPDLEN_176:
294 			spd_len = 176;
295 			break;
296 		case SPDMEM_SPDLEN_256:
297 			spd_len = 256;
298 			break;
299 		default:
300 			spd_len = 64;
301 			break;
302 		}
303 	} else {
304 		spd_size = 1 << s->sm_size;
305 		spd_len = s->sm_len;
306 		if (spd_len < 64)
307 			spd_len = 64;
308 	}
309 	if (spd_len > spd_size)
310 		spd_len = spd_size;
311 	if (spd_len > sizeof(struct spdmem))
312 		spd_len = sizeof(struct spdmem);
313 	for (i = 3; i < spd_len; i++)
314 		((uint8_t *)s)[i] = (sc->sc_read)(sc, i);
315 
316 	/*
317 	 * Setup our sysctl subtree, hw.spdmemN
318 	 */
319 	sc->sc_sysctl_log = NULL;
320 	sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
321 	    0, CTLTYPE_NODE,
322 	    device_xname(self), NULL, NULL, 0, NULL, 0,
323 	    CTL_HW, CTL_CREATE, CTL_EOL);
324 	if (node != NULL && spd_len != 0)
325                 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
326                     0,
327                     CTLTYPE_STRUCT, "spd_data",
328 		    SYSCTL_DESCR("raw spd data"), NULL,
329                     0, s, spd_len,
330                     CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
331 
332 	/*
333 	 * Decode and print key SPD contents
334 	 */
335 	if (IS_RAMBUS_TYPE) {
336 		if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
337 			type = "Rambus";
338 		else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
339 			type = "Direct Rambus";
340 		else
341 			type = "Rambus (unknown)";
342 
343 		switch (s->sm_len) {
344 		case 0:
345 			rambus_rev = "Invalid";
346 			break;
347 		case 1:
348 			rambus_rev = "0.7";
349 			break;
350 		case 2:
351 			rambus_rev = "1.0";
352 			break;
353 		default:
354 			rambus_rev = "Reserved";
355 			break;
356 		}
357 	} else {
358 		if (s->sm_type < __arraycount(spdmem_basic_types))
359 			type = spdmem_basic_types[s->sm_type];
360 		else
361 			type = "unknown memory type";
362 
363 		if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
364 		    s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
365 			type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
366 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
367 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
368 			type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
369 		if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
370 		    s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
371 			type =
372 			    spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
373 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
374 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
375 			type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
376 		}
377 		if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
378 		    s->sm_ddr4.ddr4_mod_type <
379 				__arraycount(spdmem_ddr4_module_types)) {
380 			type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
381 		}
382 	}
383 
384 	strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
385 	if (node != NULL)
386 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
387 		    0,
388 		    CTLTYPE_STRING, "mem_type",
389 		    SYSCTL_DESCR("memory module type"), NULL,
390 		    0, sc->sc_type, 0,
391 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
392 
393 	if (IS_RAMBUS_TYPE) {
394 		aprint_naive("\n");
395 		aprint_normal("\n");
396 		aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
397 		dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
398 		if (dimm_size >= 1024)
399 			aprint_normal(", %dGB\n", dimm_size / 1024);
400 		else
401 			aprint_normal(", %dMB\n", dimm_size);
402 
403 		/* No further decode for RAMBUS memory */
404 		return;
405 	}
406 	switch (s->sm_type) {
407 	case SPDMEM_MEMTYPE_EDO:
408 	case SPDMEM_MEMTYPE_FPM:
409 		decode_edofpm(node, self, s);
410 		break;
411 	case SPDMEM_MEMTYPE_ROM:
412 		decode_rom(node, self, s);
413 		break;
414 	case SPDMEM_MEMTYPE_SDRAM:
415 		decode_sdram(node, self, s, spd_len);
416 		break;
417 	case SPDMEM_MEMTYPE_DDRSDRAM:
418 		decode_ddr(node, self, s);
419 		break;
420 	case SPDMEM_MEMTYPE_DDR2SDRAM:
421 		decode_ddr2(node, self, s);
422 		break;
423 	case SPDMEM_MEMTYPE_DDR3SDRAM:
424 		decode_ddr3(node, self, s);
425 		break;
426 	case SPDMEM_MEMTYPE_FBDIMM:
427 	case SPDMEM_MEMTYPE_FBDIMM_PROBE:
428 		decode_fbdimm(node, self, s);
429 		break;
430 	case SPDMEM_MEMTYPE_DDR4SDRAM:
431 		decode_ddr4(node, self, s);
432 		break;
433 	}
434 
435 	/* Dump SPD */
436 	for (i = 0; i < spd_len;  i += 16) {
437 		unsigned int j, k;
438 		aprint_debug_dev(self, "0x%02x:", i);
439 		k = (spd_len > (i + 16)) ? i + 16 : spd_len;
440 		for (j = i; j < k; j++)
441 			aprint_debug(" %02x", ((uint8_t *)s)[j]);
442 		aprint_debug("\n");
443 	}
444 }
445 
446 int
447 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
448 {
449 	sysctl_teardown(&sc->sc_sysctl_log);
450 
451 	return 0;
452 }
453 
454 static void
455 decode_size_speed(device_t self, const struct sysctlnode *node,
456 		  int dimm_size, int cycle_time, int d_clk, int bits,
457 		  bool round, const char *ddr_type_string, int speed)
458 {
459 	int p_clk;
460 	struct spdmem_softc *sc = device_private(self);
461 
462 	if (dimm_size < 1024)
463 		aprint_normal("%dMB", dimm_size);
464 	else
465 		aprint_normal("%dGB", dimm_size / 1024);
466 	if (node != NULL)
467 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
468 		    CTLFLAG_IMMEDIATE,
469 		    CTLTYPE_INT, "size",
470 		    SYSCTL_DESCR("module size in MB"), NULL,
471 		    dimm_size, NULL, 0,
472 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
473 
474 	if (cycle_time == 0) {
475 		aprint_normal("\n");
476 		return;
477 	}
478 
479 	/*
480 	 * Calculate p_clk first, since for DDR3 we need maximum significance.
481 	 * DDR3 rating is not rounded to a multiple of 100.  This results in
482 	 * cycle_time of 1.5ns displayed as PC3-10666.
483 	 *
484 	 * For SDRAM, the speed is provided by the caller so we use it.
485 	 */
486 	d_clk *= 1000 * 1000;
487 	if (speed)
488 		p_clk = speed;
489 	else
490 		p_clk = (d_clk * bits) / 8 / cycle_time;
491 	d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
492 	if (round) {
493 		if ((p_clk % 100) >= 50)
494 			p_clk += 50;
495 		p_clk -= p_clk % 100;
496 	}
497 	aprint_normal(", %dMHz (%s-%d)\n",
498 		      d_clk, ddr_type_string, p_clk);
499 	if (node != NULL)
500 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
501 			       CTLFLAG_IMMEDIATE,
502 			       CTLTYPE_INT, "speed",
503 			       SYSCTL_DESCR("memory speed in MHz"),
504 			       NULL, d_clk, NULL, 0,
505 			       CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
506 }
507 
508 static void
509 decode_voltage_refresh(device_t self, struct spdmem *s)
510 {
511 	const char *voltage, *refresh;
512 
513 	if (s->sm_voltage < __arraycount(spdmem_voltage_types))
514 		voltage = spdmem_voltage_types[s->sm_voltage];
515 	else
516 		voltage = "unknown";
517 
518 	if (s->sm_refresh < __arraycount(spdmem_refresh_types))
519 		refresh = spdmem_refresh_types[s->sm_refresh];
520 	else
521 		refresh = "unknown";
522 
523 	aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
524 			voltage, refresh,
525 			s->sm_selfrefresh?" (self-refreshing)":"");
526 }
527 
528 static void
529 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
530 {
531 
532 	aprint_naive("\n");
533 	aprint_normal("\n");
534 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
535 
536 	aprint_normal("\n");
537 	aprint_verbose_dev(self,
538 	    "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
539 	    s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
540 	    s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
541 }
542 
543 static void
544 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
545 {
546 
547 	aprint_naive("\n");
548 	aprint_normal("\n");
549 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
550 
551 	aprint_normal("\n");
552 	aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
553 	    s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
554 }
555 
556 static void
557 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
558 	     int spd_len)
559 {
560 	int dimm_size, cycle_time, bits, tAA, i, speed, freq;
561 
562 	aprint_naive("\n");
563 	aprint_normal("\n");
564 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
565 
566 	aprint_normal("%s, %s, ",
567 		(s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
568 			" (registered)":"",
569 		(s->sm_config < __arraycount(spdmem_parity_types))?
570 			spdmem_parity_types[s->sm_config]:"invalid parity");
571 
572 	dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
573 	dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
574 
575 	cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
576 		     s->sm_sdr.sdr_cycle_tenths * 100;
577 	bits = le16toh(s->sm_sdr.sdr_datawidth);
578 	if (s->sm_config == 1 || s->sm_config == 2)
579 		bits -= 8;
580 
581 	/* Calculate speed here - from OpenBSD */
582 	if (spd_len >= 128)
583 		freq = ((uint8_t *)s)[126];
584 	else
585 		freq = 0;
586 	switch (freq) {
587 		/*
588 		 * Must check cycle time since some PC-133 DIMMs
589 		 * actually report PC-100
590 		 */
591 	    case 100:
592 	    case 133:
593 		if (cycle_time < 8000)
594 			speed = 133;
595 		else
596 			speed = 100;
597 		break;
598 	    case 0x66:		/* Legacy DIMMs use _hex_ 66! */
599 	    default:
600 		speed = 66;
601 	}
602 	decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
603 			  "PC", speed);
604 
605 	aprint_verbose_dev(self,
606 	    "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
607 	    s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
608 	    s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
609 	    (cycle_time % 1000) / 100);
610 
611 	tAA  = 0;
612 	for (i = 0; i < 8; i++)
613 		if (s->sm_sdr.sdr_tCAS & (1 << i))
614 			tAA = i;
615 	tAA++;
616 	aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
617 	    s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
618 
619 	decode_voltage_refresh(self, s);
620 }
621 
622 static void
623 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
624 {
625 	int dimm_size, cycle_time, bits, tAA, i;
626 
627 	aprint_naive("\n");
628 	aprint_normal("\n");
629 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
630 
631 	aprint_normal("%s, %s, ",
632 		(s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
633 			" (registered)":"",
634 		(s->sm_config < __arraycount(spdmem_parity_types))?
635 			spdmem_parity_types[s->sm_config]:"invalid parity");
636 
637 	dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
638 	dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
639 
640 	cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
641 		  spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
642 	bits = le16toh(s->sm_ddr.ddr_datawidth);
643 	if (s->sm_config == 1 || s->sm_config == 2)
644 		bits -= 8;
645 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
646 			  "PC", 0);
647 
648 	aprint_verbose_dev(self,
649 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
650 	    s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
651 	    s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
652 	    (cycle_time % 1000 + 50) / 100);
653 
654 	tAA  = 0;
655 	for (i = 2; i < 8; i++)
656 		if (s->sm_ddr.ddr_tCAS & (1 << i))
657 			tAA = i;
658 	tAA /= 2;
659 
660 #define __DDR_ROUND(scale, field)	\
661 		((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
662 
663 	aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
664 		__DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
665 
666 #undef	__DDR_ROUND
667 
668 	decode_voltage_refresh(self, s);
669 }
670 
671 static void
672 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
673 {
674 	int dimm_size, cycle_time, bits, tAA, i;
675 
676 	aprint_naive("\n");
677 	aprint_normal("\n");
678 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
679 
680 	aprint_normal("%s, %s, ",
681 		(s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
682 			" (registered)":"",
683 		(s->sm_config < __arraycount(spdmem_parity_types))?
684 			spdmem_parity_types[s->sm_config]:"invalid parity");
685 
686 	dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
687 	dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
688 		     s->sm_ddr2.ddr2_banks_per_chip;
689 
690 	cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
691 		 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
692 	bits = s->sm_ddr2.ddr2_datawidth;
693 	if ((s->sm_config & 0x03) != 0)
694 		bits -= 8;
695 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
696 			  "PC2", 0);
697 
698 	aprint_verbose_dev(self,
699 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
700 	    s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
701 	    s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
702 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
703 
704 	tAA  = 0;
705 	for (i = 2; i < 8; i++)
706 		if (s->sm_ddr2.ddr2_tCAS & (1 << i))
707 			tAA = i;
708 
709 #define __DDR2_ROUND(scale, field)	\
710 		((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
711 
712 	aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
713 		__DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
714 
715 #undef	__DDR_ROUND
716 
717 	decode_voltage_refresh(self, s);
718 }
719 
720 static void
721 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
722 {
723 	int dimm_size, cycle_time, bits;
724 
725 	aprint_naive("\n");
726 	aprint_normal(": %18s\n", s->sm_ddr3.ddr3_part);
727 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
728 
729 	if (s->sm_ddr3.ddr3_mod_type ==
730 		SPDMEM_DDR3_TYPE_MINI_RDIMM ||
731 	    s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
732 		aprint_normal(" (registered)");
733 	aprint_normal(", %sECC, %stemp-sensor, ",
734 		(s->sm_ddr3.ddr3_hasECC)?"":"no ",
735 		(s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
736 
737 	/*
738 	 * DDR3 size specification is quite different from others
739 	 *
740 	 * Module capacity is defined as
741 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
742 	 *	external_bus_width / internal_bus_width
743 	 * We further divide by 2**20 to get our answer in MB
744 	 */
745 	dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
746 		    (s->sm_ddr3.ddr3_datawidth + 3) -
747 		    (s->sm_ddr3.ddr3_chipwidth + 2);
748 	dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
749 
750 	cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend +
751 			    (s->sm_ddr3.ddr3_mtb_divisor / 2)) /
752 		     s->sm_ddr3.ddr3_mtb_divisor;
753 	cycle_time *= s->sm_ddr3.ddr3_tCKmin;
754 	bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
755 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
756 			  "PC3", 0);
757 
758 	aprint_verbose_dev(self,
759 	    "%d rows, %d cols, %d log. banks, %d phys. banks, "
760 	    "%d.%03dns cycle time\n",
761 	    s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
762 	    1 << (s->sm_ddr3.ddr3_logbanks + 3),
763 	    s->sm_ddr3.ddr3_physbanks + 1,
764 	    cycle_time/1000, cycle_time % 1000);
765 
766 #define	__DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin)
767 
768 	aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin),
769 		__DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin),
770 		(s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) /
771 		    s->sm_ddr3.ddr3_tCKmin);
772 
773 #undef	__DDR3_CYCLES
774 
775 	/* For DDR3, Voltage is written in another area */
776 	if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
777 	    || s->sm_ddr3.ddr3_125V) {
778 		aprint_verbose("%s:", device_xname(self));
779 		if (!s->sm_ddr3.ddr3_NOT15V)
780 			aprint_verbose(" 1.5V");
781 		if (s->sm_ddr3.ddr3_135V)
782 			aprint_verbose(" 1.35V");
783 		if (s->sm_ddr3.ddr3_125V)
784 			aprint_verbose(" 1.25V");
785 		aprint_verbose(" operable\n");
786 	}
787 }
788 
789 static void
790 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
791 {
792 	int dimm_size, cycle_time, bits;
793 
794 	aprint_naive("\n");
795 	aprint_normal("\n");
796 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
797 
798 	/*
799 	 * FB-DIMM module size calculation is very much like DDR3
800 	 */
801 	dimm_size = s->sm_fbd.fbdimm_rows + 12 +
802 		    s->sm_fbd.fbdimm_cols +  9 - 20 - 3;
803 	dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
804 
805 	cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
806 			    (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
807 		     s->sm_fbd.fbdimm_mtb_divisor;
808 	bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
809 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
810 			  "PC2", 0);
811 
812 	aprint_verbose_dev(self,
813 	    "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
814 	    s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
815 	    1 << (s->sm_fbd.fbdimm_banks + 2),
816 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
817 
818 #define	__FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
819 
820 	aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
821 		__FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
822 		(s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
823 		    s->sm_fbd.fbdimm_tCKmin);
824 
825 #undef	__FBDIMM_CYCLES
826 
827 	decode_voltage_refresh(self, s);
828 }
829 
830 static void
831 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
832 {
833 	int dimm_size, cycle_time;
834 	int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks;
835 
836 	aprint_naive("\n");
837 	aprint_normal(": %20s\n", s->sm_ddr4.ddr4_part_number);
838 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
839 	if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
840 		aprint_normal(" (%s)",
841 		    spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
842 	aprint_normal(", %stemp-sensor, ",
843 		(s->sm_ddr4.ddr4_has_therm_sensor)?"":"no ");
844 
845 	/*
846 	 * DDR4 size calculation from JEDEC spec
847 	 *
848 	 * Module capacity in bytes is defined as
849 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
850 	 *	primary_bus_width / DRAM_width *
851 	 *	logical_ranks_per_DIMM
852 	 *
853 	 * logical_ranks_per DIMM equals package_ranks, but multiply
854 	 * by diecount for 3DS packages
855 	 *
856 	 * We further divide by 2**20 to get our answer in MB
857 	 */
858 	dimm_size = (s->sm_ddr4.ddr4_capacity + 28)	/* chip_capacity */
859 		     - 20				/* convert to MB */
860 		     - 3				/* bits --> bytes */
861 		     + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
862 	switch (s->sm_ddr4.ddr4_device_width) {		/* DRAM width */
863 	case 0:	dimm_size -= 2;
864 		break;
865 	case 1: dimm_size -= 3;
866 		break;
867 	case 2:	dimm_size -= 4;
868 		break;
869 	case 4: dimm_size -= 5;
870 		break;
871 	default:
872 		dimm_size = -1;		/* flag invalid value */
873 	}
874 	if (dimm_size >= 0) {
875 		dimm_size = (1 << dimm_size) *
876 		    (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
877 		if (s->sm_ddr4.ddr4_signal_loading == 2) {
878 			dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
879 		}
880 	}
881 
882 #define	__DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 +	\
883 			     s->sm_ddr4.ddr4_##field##_ftb) - 		\
884 			    ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
885 	/*
886 	 * For now, the only value for mtb is 1 = 125ps, and ftp = 1ps
887 	 * so we don't need to figure out the time-base units - just
888 	 * hard-code them for now.
889 	 */
890 	cycle_time = __DDR4_VALUE(tCKAVGmin);
891 	aprint_normal("%d.%03dns cycle time (%dMHz), ", cycle_time/1000,
892 	    cycle_time % 1000, 1000000 / cycle_time);
893 
894 	decode_size_speed(self, node, dimm_size, cycle_time, 2,
895 			  1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
896 			  TRUE, "PC4", 0);
897 
898 	aprint_verbose_dev(self,
899 	    "%d rows, %d cols, %d banks, %d bank groups\n",
900 	    s->sm_ddr4.ddr4_rows + 9, s->sm_ddr4.ddr4_cols + 12,
901 	    1 << (2 + s->sm_ddr4.ddr4_logbanks),
902 	    1 << s->sm_ddr4.ddr4_bankgroups);
903 
904 /*
905  * Note that the ddr4_xxx_ftb fields are actually signed offsets from
906  * the corresponding mtb value, so we might have to subtract 256!
907  */
908 
909 	tAA_clocks =  __DDR4_VALUE(tAAmin)  * 1000 / cycle_time;
910 	tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
911 	tRP_clocks =  __DDR4_VALUE(tRPmin)  * 1000 / cycle_time;
912 	tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
913 		       s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
914 
915 /*
916  * Per JEDEC spec, rounding is done by taking the time value, dividing
917  * by the cycle time, subtracting .010 from the result, and then
918  * rounded up to the nearest integer.  Unfortunately, none of their
919  * examples say what to do when the result of the subtraction is already
920  * an integer.  For now, assume that we still round up (so an interval
921  * of exactly 12.010 clock cycles will be printed as 13).
922  */
923 #define	__DDR4_ROUND(value) ((value - 10) / 1000 + 1)
924 
925 	aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
926 			   __DDR4_ROUND(tRCD_clocks),
927 			   __DDR4_ROUND(tRP_clocks),
928 			   __DDR4_ROUND(tRAS_clocks));
929 
930 #undef	__DDR4_VALUE
931 #undef	__DDR4_ROUND
932 }
933