xref: /netbsd-src/sys/dev/ic/spdmem.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* $NetBSD: spdmem.c,v 1.21 2016/01/05 11:49:32 msaitoh Exp $ */
2 
3 /*
4  * Copyright (c) 2007 Nicolas Joly
5  * Copyright (c) 2007 Paul Goyette
6  * Copyright (c) 2007 Tobias Nygren
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Serial Presence Detect (SPD) memory identification
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.21 2016/01/05 11:49:32 msaitoh Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45 
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49 
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 	int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60 
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 			      int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64 
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66 
67 static const char* const spdmem_basic_types[] = {
68 	"unknown",
69 	"FPM",
70 	"EDO",
71 	"Pipelined Nibble",
72 	"SDRAM",
73 	"ROM",
74 	"DDR SGRAM",
75 	"DDR SDRAM",
76 	"DDR2 SDRAM",
77 	"DDR2 SDRAM FB",
78 	"DDR2 SDRAM FB Probe",
79 	"DDR3 SDRAM",
80 	"DDR4 SDRAM",
81 	"unknown",
82 	"DDR4E SDRAM",
83 	"LPDDR3 SDRAM",
84 	"LPDDR4 SDRAM"
85 };
86 
87 static const char* const spdmem_ddr4_module_types[] = {
88 	"DDR4 Extended",
89 	"DDR4 RDIMM",
90 	"DDR4 UDIMM",
91 	"DDR4 SO-DIMM",
92 	"DDR4 Load-Reduced DIMM",
93 	"DDR4 Mini-RDIMM",
94 	"DDR4 Mini-UDIMM",
95 	"DDR4 Reserved",
96 	"DDR4 72Bit SO-RDIMM",
97 	"DDR4 72Bit SO-UDIMM",
98 	"DDR4 Undefined",
99 	"DDR4 Reserved",
100 	"DDR4 16Bit SO-DIMM",
101 	"DDR4 32Bit SO-DIMM",
102 	"DDR4 Reserved",
103 	"DDR4 Undefined"
104 };
105 
106 static const char* const spdmem_superset_types[] = {
107 	"unknown",
108 	"ESDRAM",
109 	"DDR ESDRAM",
110 	"PEM EDO",
111 	"PEM SDRAM"
112 };
113 
114 static const char* const spdmem_voltage_types[] = {
115 	"TTL (5V tolerant)",
116 	"LvTTL (not 5V tolerant)",
117 	"HSTL 1.5V",
118 	"SSTL 3.3V",
119 	"SSTL 2.5V",
120 	"SSTL 1.8V"
121 };
122 
123 static const char* const spdmem_refresh_types[] = {
124 	"15.625us",
125 	"3.9us",
126 	"7.8us",
127 	"31.3us",
128 	"62.5us",
129 	"125us"
130 };
131 
132 static const char* const spdmem_parity_types[] = {
133 	"no parity or ECC",
134 	"data parity",
135 	"data ECC",
136 	"data parity and ECC",
137 	"cmd/addr parity",
138 	"cmd/addr/data parity",
139 	"cmd/addr parity, data ECC",
140 	"cmd/addr/data parity, data ECC"
141 };
142 
143 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
144 
145 
146 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
147 static const uint16_t spdmem_cycle_frac[] = {
148 	0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
149 	250, 333, 667, 750, 999, 999
150 };
151 
152 /* Format string for timing info */
153 #define	LATENCY	"tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
154 
155 /* CRC functions used for certain memory types */
156 
157 static uint16_t
158 spdcrc16(struct spdmem_softc *sc, int count)
159 {
160 	uint16_t crc;
161 	int i, j;
162 	uint8_t val;
163 	crc = 0;
164 	for (j = 0; j <= count; j++) {
165 		(sc->sc_read)(sc, j, &val);
166 		crc = crc ^ val << 8;
167 		for (i = 0; i < 8; ++i)
168 			if (crc & 0x8000)
169 				crc = crc << 1 ^ 0x1021;
170 			else
171 				crc = crc << 1;
172 	}
173 	return (crc & 0xFFFF);
174 }
175 
176 int
177 spdmem_common_probe(struct spdmem_softc *sc)
178 {
179 	int cksum = 0;
180 	uint8_t i, val, spd_type;
181 	int spd_len, spd_crc_cover;
182 	uint16_t crc_calc, crc_spd;
183 
184 	/* Read failed means a device doesn't exist */
185 	if ((sc->sc_read)(sc, 2, &spd_type) != 0)
186 		return 0;
187 
188 	/* For older memory types, validate the checksum over 1st 63 bytes */
189 	if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
190 		for (i = 0; i < 63; i++) {
191 			(sc->sc_read)(sc, i, &val);
192 			cksum += val;
193 		}
194 
195 		(sc->sc_read)(sc, 63, &val);
196 
197 		if ((cksum & 0xff) != val) {
198 			aprint_debug("spd checksum failed, calc = 0x%02x, "
199 				     "spd = 0x%02x\n", cksum, val);
200 			return 0;
201 		} else
202 			return 1;
203 	}
204 
205 	/* For DDR3 and FBDIMM, verify the CRC */
206 	else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
207 		(sc->sc_read)(sc, 0, &val);
208 		spd_len = val;
209 		if (spd_len & SPDMEM_SPDCRC_116)
210 			spd_crc_cover = 116;
211 		else
212 			spd_crc_cover = 125;
213 		switch (spd_len & SPDMEM_SPDLEN_MASK) {
214 		case SPDMEM_SPDLEN_128:
215 			spd_len = 128;
216 			break;
217 		case SPDMEM_SPDLEN_176:
218 			spd_len = 176;
219 			break;
220 		case SPDMEM_SPDLEN_256:
221 			spd_len = 256;
222 			break;
223 		default:
224 			return 0;
225 		}
226 		if (spd_crc_cover > spd_len)
227 			return 0;
228 		crc_calc = spdcrc16(sc, spd_crc_cover);
229 		(sc->sc_read)(sc, 127, &val);
230 		crc_spd = val << 8;
231 		(sc->sc_read)(sc, 126, &val);
232 		crc_spd |= val;
233 		if (crc_calc != crc_spd) {
234 			aprint_debug("crc16 failed, covers %d bytes, "
235 				     "calc = 0x%04x, spd = 0x%04x\n",
236 				     spd_crc_cover, crc_calc, crc_spd);
237 			return 0;
238 		}
239 		return 1;
240 	} else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
241 		(sc->sc_read)(sc, 0, &val);
242 		spd_len = val & 0x0f;
243 		if ((unsigned int)spd_len > __arraycount(spd_rom_sizes))
244 			return 0;
245 		spd_len = spd_rom_sizes[spd_len];
246 		spd_crc_cover = 125; /* For byte 0 to 125 */
247 		if (spd_crc_cover > spd_len)
248 			return 0;
249 		crc_calc = spdcrc16(sc, spd_crc_cover);
250 		(sc->sc_read)(sc, 127, &val);
251 		crc_spd = val << 8;
252 		(sc->sc_read)(sc, 126, &val);
253 		crc_spd |= val;
254 		if (crc_calc != crc_spd) {
255 			aprint_debug("crc16 failed, covers %d bytes, "
256 				     "calc = 0x%04x, spd = 0x%04x\n",
257 				     spd_crc_cover, crc_calc, crc_spd);
258 			return 0;
259 		}
260 		/*
261 		 * We probably could also verify the CRC for the other
262 		 * "pages" of SPD data in blocks 1 and 2, but we'll do
263 		 * it some other time.
264 		 */
265 		return 1;
266 	} else
267 		return 0;
268 
269 	/* For unrecognized memory types, don't match at all */
270 	return 0;
271 }
272 
273 void
274 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
275 {
276 	struct spdmem *s = &(sc->sc_spd_data);
277 	const char *type;
278 	const char *rambus_rev = "Reserved";
279 	int dimm_size;
280 	unsigned int i, spd_len, spd_size;
281 	const struct sysctlnode *node = NULL;
282 
283 	(sc->sc_read)(sc, 0, &s->sm_len);
284 	(sc->sc_read)(sc, 1, &s->sm_size);
285 	(sc->sc_read)(sc, 2, &s->sm_type);
286 
287 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
288 		/*
289 		 * An even newer encoding with one byte holding both
290 		 * the used-size and capacity values
291 		 */
292 		spd_len = s->sm_len & 0x0f;
293 		spd_size = (s->sm_len >> 4) & 0x07;
294 
295 		spd_len = spd_rom_sizes[spd_len];
296 		spd_size *= 512;
297 
298 	} else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
299 		/*
300 		 * FBDIMM and DDR3 (and probably all newer) have a different
301 		 * encoding of the SPD EEPROM used/total sizes
302 		 */
303 		spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
304 		switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
305 		case SPDMEM_SPDLEN_128:
306 			spd_len = 128;
307 			break;
308 		case SPDMEM_SPDLEN_176:
309 			spd_len = 176;
310 			break;
311 		case SPDMEM_SPDLEN_256:
312 			spd_len = 256;
313 			break;
314 		default:
315 			spd_len = 64;
316 			break;
317 		}
318 	} else {
319 		spd_size = 1 << s->sm_size;
320 		spd_len = s->sm_len;
321 		if (spd_len < 64)
322 			spd_len = 64;
323 	}
324 	if (spd_len > spd_size)
325 		spd_len = spd_size;
326 	if (spd_len > sizeof(struct spdmem))
327 		spd_len = sizeof(struct spdmem);
328 	for (i = 3; i < spd_len; i++)
329 		(sc->sc_read)(sc, i, &((uint8_t *)s)[i]);
330 
331 	/*
332 	 * Setup our sysctl subtree, hw.spdmemN
333 	 */
334 	sc->sc_sysctl_log = NULL;
335 	sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
336 	    0, CTLTYPE_NODE,
337 	    device_xname(self), NULL, NULL, 0, NULL, 0,
338 	    CTL_HW, CTL_CREATE, CTL_EOL);
339 	if (node != NULL && spd_len != 0)
340                 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
341                     0,
342                     CTLTYPE_STRUCT, "spd_data",
343 		    SYSCTL_DESCR("raw spd data"), NULL,
344                     0, s, spd_len,
345                     CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
346 
347 	/*
348 	 * Decode and print key SPD contents
349 	 */
350 	if (IS_RAMBUS_TYPE) {
351 		if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
352 			type = "Rambus";
353 		else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
354 			type = "Direct Rambus";
355 		else
356 			type = "Rambus (unknown)";
357 
358 		switch (s->sm_len) {
359 		case 0:
360 			rambus_rev = "Invalid";
361 			break;
362 		case 1:
363 			rambus_rev = "0.7";
364 			break;
365 		case 2:
366 			rambus_rev = "1.0";
367 			break;
368 		default:
369 			rambus_rev = "Reserved";
370 			break;
371 		}
372 	} else {
373 		if (s->sm_type < __arraycount(spdmem_basic_types))
374 			type = spdmem_basic_types[s->sm_type];
375 		else
376 			type = "unknown memory type";
377 
378 		if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
379 		    s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
380 			type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
381 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
382 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
383 			type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
384 		if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
385 		    s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
386 			type =
387 			    spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
388 		if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
389 		    s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
390 			type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
391 		}
392 		if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
393 		    s->sm_ddr4.ddr4_mod_type <
394 				__arraycount(spdmem_ddr4_module_types)) {
395 			type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
396 		}
397 	}
398 
399 	strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
400 
401 	if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
402 		/*
403 		 * The latest spec (DDR4 SPD Document Release 3) defines
404 		 * NVDIMM Hybrid only.
405 		 */
406 		if ((s->sm_ddr4.ddr4_hybrid)
407 		    && (s->sm_ddr4.ddr4_hybrid_media == 1))
408 			strlcat(sc->sc_type, " NVDIMM hybrid",
409 			    SPDMEM_TYPE_MAXLEN);
410 	}
411 
412 	if (node != NULL)
413 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
414 		    0,
415 		    CTLTYPE_STRING, "mem_type",
416 		    SYSCTL_DESCR("memory module type"), NULL,
417 		    0, sc->sc_type, 0,
418 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
419 
420 	if (IS_RAMBUS_TYPE) {
421 		aprint_naive("\n");
422 		aprint_normal("\n");
423 		aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
424 		dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
425 		if (dimm_size >= 1024)
426 			aprint_normal(", %dGB\n", dimm_size / 1024);
427 		else
428 			aprint_normal(", %dMB\n", dimm_size);
429 
430 		/* No further decode for RAMBUS memory */
431 		return;
432 	}
433 	switch (s->sm_type) {
434 	case SPDMEM_MEMTYPE_EDO:
435 	case SPDMEM_MEMTYPE_FPM:
436 		decode_edofpm(node, self, s);
437 		break;
438 	case SPDMEM_MEMTYPE_ROM:
439 		decode_rom(node, self, s);
440 		break;
441 	case SPDMEM_MEMTYPE_SDRAM:
442 		decode_sdram(node, self, s, spd_len);
443 		break;
444 	case SPDMEM_MEMTYPE_DDRSDRAM:
445 		decode_ddr(node, self, s);
446 		break;
447 	case SPDMEM_MEMTYPE_DDR2SDRAM:
448 		decode_ddr2(node, self, s);
449 		break;
450 	case SPDMEM_MEMTYPE_DDR3SDRAM:
451 		decode_ddr3(node, self, s);
452 		break;
453 	case SPDMEM_MEMTYPE_FBDIMM:
454 	case SPDMEM_MEMTYPE_FBDIMM_PROBE:
455 		decode_fbdimm(node, self, s);
456 		break;
457 	case SPDMEM_MEMTYPE_DDR4SDRAM:
458 		decode_ddr4(node, self, s);
459 		break;
460 	}
461 
462 	/* Dump SPD */
463 	for (i = 0; i < spd_len;  i += 16) {
464 		unsigned int j, k;
465 		aprint_debug_dev(self, "0x%02x:", i);
466 		k = (spd_len > (i + 16)) ? i + 16 : spd_len;
467 		for (j = i; j < k; j++)
468 			aprint_debug(" %02x", ((uint8_t *)s)[j]);
469 		aprint_debug("\n");
470 	}
471 }
472 
473 int
474 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
475 {
476 	sysctl_teardown(&sc->sc_sysctl_log);
477 
478 	return 0;
479 }
480 
481 static void
482 decode_size_speed(device_t self, const struct sysctlnode *node,
483 		  int dimm_size, int cycle_time, int d_clk, int bits,
484 		  bool round, const char *ddr_type_string, int speed)
485 {
486 	int p_clk;
487 	struct spdmem_softc *sc = device_private(self);
488 
489 	if (dimm_size < 1024)
490 		aprint_normal("%dMB", dimm_size);
491 	else
492 		aprint_normal("%dGB", dimm_size / 1024);
493 	if (node != NULL)
494 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
495 		    CTLFLAG_IMMEDIATE,
496 		    CTLTYPE_INT, "size",
497 		    SYSCTL_DESCR("module size in MB"), NULL,
498 		    dimm_size, NULL, 0,
499 		    CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
500 
501 	if (cycle_time == 0) {
502 		aprint_normal("\n");
503 		return;
504 	}
505 
506 	/*
507 	 * Calculate p_clk first, since for DDR3 we need maximum significance.
508 	 * DDR3 rating is not rounded to a multiple of 100.  This results in
509 	 * cycle_time of 1.5ns displayed as PC3-10666.
510 	 *
511 	 * For SDRAM, the speed is provided by the caller so we use it.
512 	 */
513 	d_clk *= 1000 * 1000;
514 	if (speed)
515 		p_clk = speed;
516 	else
517 		p_clk = (d_clk * bits) / 8 / cycle_time;
518 	d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
519 	if (round) {
520 		if ((p_clk % 100) >= 50)
521 			p_clk += 50;
522 		p_clk -= p_clk % 100;
523 	}
524 	aprint_normal(", %dMHz (%s-%d)\n",
525 		      d_clk, ddr_type_string, p_clk);
526 	if (node != NULL)
527 		sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
528 			       CTLFLAG_IMMEDIATE,
529 			       CTLTYPE_INT, "speed",
530 			       SYSCTL_DESCR("memory speed in MHz"),
531 			       NULL, d_clk, NULL, 0,
532 			       CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
533 }
534 
535 static void
536 decode_voltage_refresh(device_t self, struct spdmem *s)
537 {
538 	const char *voltage, *refresh;
539 
540 	if (s->sm_voltage < __arraycount(spdmem_voltage_types))
541 		voltage = spdmem_voltage_types[s->sm_voltage];
542 	else
543 		voltage = "unknown";
544 
545 	if (s->sm_refresh < __arraycount(spdmem_refresh_types))
546 		refresh = spdmem_refresh_types[s->sm_refresh];
547 	else
548 		refresh = "unknown";
549 
550 	aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
551 			voltage, refresh,
552 			s->sm_selfrefresh?" (self-refreshing)":"");
553 }
554 
555 static void
556 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
557 {
558 
559 	aprint_naive("\n");
560 	aprint_normal("\n");
561 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
562 
563 	aprint_normal("\n");
564 	aprint_verbose_dev(self,
565 	    "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
566 	    s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
567 	    s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
568 }
569 
570 static void
571 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
572 {
573 
574 	aprint_naive("\n");
575 	aprint_normal("\n");
576 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
577 
578 	aprint_normal("\n");
579 	aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
580 	    s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
581 }
582 
583 static void
584 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
585 	     int spd_len)
586 {
587 	int dimm_size, cycle_time, bits, tAA, i, speed, freq;
588 
589 	aprint_naive("\n");
590 	aprint_normal("\n");
591 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
592 
593 	aprint_normal("%s, %s, ",
594 		(s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
595 			" (registered)":"",
596 		(s->sm_config < __arraycount(spdmem_parity_types))?
597 			spdmem_parity_types[s->sm_config]:"invalid parity");
598 
599 	dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
600 	dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
601 
602 	cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
603 		     s->sm_sdr.sdr_cycle_tenths * 100;
604 	bits = le16toh(s->sm_sdr.sdr_datawidth);
605 	if (s->sm_config == 1 || s->sm_config == 2)
606 		bits -= 8;
607 
608 	/* Calculate speed here - from OpenBSD */
609 	if (spd_len >= 128)
610 		freq = ((uint8_t *)s)[126];
611 	else
612 		freq = 0;
613 	switch (freq) {
614 		/*
615 		 * Must check cycle time since some PC-133 DIMMs
616 		 * actually report PC-100
617 		 */
618 	    case 100:
619 	    case 133:
620 		if (cycle_time < 8000)
621 			speed = 133;
622 		else
623 			speed = 100;
624 		break;
625 	    case 0x66:		/* Legacy DIMMs use _hex_ 66! */
626 	    default:
627 		speed = 66;
628 	}
629 	decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
630 			  "PC", speed);
631 
632 	aprint_verbose_dev(self,
633 	    "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
634 	    s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
635 	    s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
636 	    (cycle_time % 1000) / 100);
637 
638 	tAA  = 0;
639 	for (i = 0; i < 8; i++)
640 		if (s->sm_sdr.sdr_tCAS & (1 << i))
641 			tAA = i;
642 	tAA++;
643 	aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
644 	    s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
645 
646 	decode_voltage_refresh(self, s);
647 }
648 
649 static void
650 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
651 {
652 	int dimm_size, cycle_time, bits, tAA, i;
653 
654 	aprint_naive("\n");
655 	aprint_normal("\n");
656 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
657 
658 	aprint_normal("%s, %s, ",
659 		(s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
660 			" (registered)":"",
661 		(s->sm_config < __arraycount(spdmem_parity_types))?
662 			spdmem_parity_types[s->sm_config]:"invalid parity");
663 
664 	dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
665 	dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
666 
667 	cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
668 		  spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
669 	bits = le16toh(s->sm_ddr.ddr_datawidth);
670 	if (s->sm_config == 1 || s->sm_config == 2)
671 		bits -= 8;
672 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
673 			  "PC", 0);
674 
675 	aprint_verbose_dev(self,
676 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
677 	    s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
678 	    s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
679 	    (cycle_time % 1000 + 50) / 100);
680 
681 	tAA  = 0;
682 	for (i = 2; i < 8; i++)
683 		if (s->sm_ddr.ddr_tCAS & (1 << i))
684 			tAA = i;
685 	tAA /= 2;
686 
687 #define __DDR_ROUND(scale, field)	\
688 		((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
689 
690 	aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
691 		__DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
692 
693 #undef	__DDR_ROUND
694 
695 	decode_voltage_refresh(self, s);
696 }
697 
698 static void
699 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
700 {
701 	int dimm_size, cycle_time, bits, tAA, i;
702 
703 	aprint_naive("\n");
704 	aprint_normal("\n");
705 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
706 
707 	aprint_normal("%s, %s, ",
708 		(s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
709 			" (registered)":"",
710 		(s->sm_config < __arraycount(spdmem_parity_types))?
711 			spdmem_parity_types[s->sm_config]:"invalid parity");
712 
713 	dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
714 	dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
715 		     s->sm_ddr2.ddr2_banks_per_chip;
716 
717 	cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
718 		 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
719 	bits = s->sm_ddr2.ddr2_datawidth;
720 	if ((s->sm_config & 0x03) != 0)
721 		bits -= 8;
722 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
723 			  "PC2", 0);
724 
725 	aprint_verbose_dev(self,
726 	    "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
727 	    s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
728 	    s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
729 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
730 
731 	tAA  = 0;
732 	for (i = 2; i < 8; i++)
733 		if (s->sm_ddr2.ddr2_tCAS & (1 << i))
734 			tAA = i;
735 
736 #define __DDR2_ROUND(scale, field)	\
737 		((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
738 
739 	aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
740 		__DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
741 
742 #undef	__DDR_ROUND
743 
744 	decode_voltage_refresh(self, s);
745 }
746 
747 static void
748 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
749 {
750 	int dimm_size, cycle_time, bits;
751 
752 	aprint_naive("\n");
753 	aprint_normal(": %18s\n", s->sm_ddr3.ddr3_part);
754 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
755 
756 	if (s->sm_ddr3.ddr3_mod_type ==
757 		SPDMEM_DDR3_TYPE_MINI_RDIMM ||
758 	    s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
759 		aprint_normal(" (registered)");
760 	aprint_normal(", %sECC, %stemp-sensor, ",
761 		(s->sm_ddr3.ddr3_hasECC)?"":"no ",
762 		(s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
763 
764 	/*
765 	 * DDR3 size specification is quite different from others
766 	 *
767 	 * Module capacity is defined as
768 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
769 	 *	external_bus_width / internal_bus_width
770 	 * We further divide by 2**20 to get our answer in MB
771 	 */
772 	dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
773 		    (s->sm_ddr3.ddr3_datawidth + 3) -
774 		    (s->sm_ddr3.ddr3_chipwidth + 2);
775 	dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
776 
777 	cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend +
778 			    (s->sm_ddr3.ddr3_mtb_divisor / 2)) /
779 		     s->sm_ddr3.ddr3_mtb_divisor;
780 	cycle_time *= s->sm_ddr3.ddr3_tCKmin;
781 	bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
782 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
783 			  "PC3", 0);
784 
785 	aprint_verbose_dev(self,
786 	    "%d rows, %d cols, %d log. banks, %d phys. banks, "
787 	    "%d.%03dns cycle time\n",
788 	    s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
789 	    1 << (s->sm_ddr3.ddr3_logbanks + 3),
790 	    s->sm_ddr3.ddr3_physbanks + 1,
791 	    cycle_time/1000, cycle_time % 1000);
792 
793 #define	__DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin)
794 
795 	aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin),
796 		__DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin),
797 		(s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) /
798 		    s->sm_ddr3.ddr3_tCKmin);
799 
800 #undef	__DDR3_CYCLES
801 
802 	/* For DDR3, Voltage is written in another area */
803 	if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
804 	    || s->sm_ddr3.ddr3_125V) {
805 		aprint_verbose("%s:", device_xname(self));
806 		if (!s->sm_ddr3.ddr3_NOT15V)
807 			aprint_verbose(" 1.5V");
808 		if (s->sm_ddr3.ddr3_135V)
809 			aprint_verbose(" 1.35V");
810 		if (s->sm_ddr3.ddr3_125V)
811 			aprint_verbose(" 1.25V");
812 		aprint_verbose(" operable\n");
813 	}
814 }
815 
816 static void
817 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
818 {
819 	int dimm_size, cycle_time, bits;
820 
821 	aprint_naive("\n");
822 	aprint_normal("\n");
823 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
824 
825 	/*
826 	 * FB-DIMM module size calculation is very much like DDR3
827 	 */
828 	dimm_size = s->sm_fbd.fbdimm_rows + 12 +
829 		    s->sm_fbd.fbdimm_cols +  9 - 20 - 3;
830 	dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
831 
832 	cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
833 			    (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
834 		     s->sm_fbd.fbdimm_mtb_divisor;
835 	bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
836 	decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
837 			  "PC2", 0);
838 
839 	aprint_verbose_dev(self,
840 	    "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
841 	    s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
842 	    1 << (s->sm_fbd.fbdimm_banks + 2),
843 	    cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
844 
845 #define	__FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
846 
847 	aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
848 		__FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
849 		(s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
850 		    s->sm_fbd.fbdimm_tCKmin);
851 
852 #undef	__FBDIMM_CYCLES
853 
854 	decode_voltage_refresh(self, s);
855 }
856 
857 static void
858 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
859 {
860 	int dimm_size, cycle_time;
861 	int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks;
862 
863 	aprint_naive("\n");
864 	aprint_normal(": %20s\n", s->sm_ddr4.ddr4_part_number);
865 	aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
866 	if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
867 		aprint_normal(" (%s)",
868 		    spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
869 	aprint_normal(", %stemp-sensor, ",
870 		(s->sm_ddr4.ddr4_has_therm_sensor)?"":"no ");
871 
872 	/*
873 	 * DDR4 size calculation from JEDEC spec
874 	 *
875 	 * Module capacity in bytes is defined as
876 	 *	Chip_Capacity_in_bits / 8bits-per-byte *
877 	 *	primary_bus_width / DRAM_width *
878 	 *	logical_ranks_per_DIMM
879 	 *
880 	 * logical_ranks_per DIMM equals package_ranks, but multiply
881 	 * by diecount for 3DS packages
882 	 *
883 	 * We further divide by 2**20 to get our answer in MB
884 	 */
885 	dimm_size = (s->sm_ddr4.ddr4_capacity + 28)	/* chip_capacity */
886 		     - 20				/* convert to MB */
887 		     - 3				/* bits --> bytes */
888 		     + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
889 	switch (s->sm_ddr4.ddr4_device_width) {		/* DRAM width */
890 	case 0:	dimm_size -= 2;
891 		break;
892 	case 1: dimm_size -= 3;
893 		break;
894 	case 2:	dimm_size -= 4;
895 		break;
896 	case 4: dimm_size -= 5;
897 		break;
898 	default:
899 		dimm_size = -1;		/* flag invalid value */
900 	}
901 	if (dimm_size >= 0) {
902 		dimm_size = (1 << dimm_size) *
903 		    (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
904 		if (s->sm_ddr4.ddr4_signal_loading == 2) {
905 			dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
906 		}
907 	}
908 
909 #define	__DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 +	\
910 			     s->sm_ddr4.ddr4_##field##_ftb) - 		\
911 			    ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
912 	/*
913 	 * For now, the only value for mtb is 1 = 125ps, and ftp = 1ps
914 	 * so we don't need to figure out the time-base units - just
915 	 * hard-code them for now.
916 	 */
917 	cycle_time = __DDR4_VALUE(tCKAVGmin);
918 	decode_size_speed(self, node, dimm_size, cycle_time, 2,
919 			  1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
920 			  TRUE, "PC4", 0);
921 
922 	aprint_verbose_dev(self,
923 	    "%d rows, %d cols, %d banks, %d bank groups, "
924 	    "%d.%03dns cycle time\n",
925 	    s->sm_ddr4.ddr4_rows + 9, s->sm_ddr4.ddr4_cols + 12,
926 	    1 << (2 + s->sm_ddr4.ddr4_logbanks),
927 	    1 << s->sm_ddr4.ddr4_bankgroups,
928 	    cycle_time / 1000, cycle_time % 1000);
929 
930 /*
931  * Note that the ddr4_xxx_ftb fields are actually signed offsets from
932  * the corresponding mtb value, so we might have to subtract 256!
933  */
934 
935 	tAA_clocks =  __DDR4_VALUE(tAAmin)  * 1000 / cycle_time;
936 	tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
937 	tRP_clocks =  __DDR4_VALUE(tRPmin)  * 1000 / cycle_time;
938 	tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
939 		       s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
940 
941 /*
942  * Per JEDEC spec, rounding is done by taking the time value, dividing
943  * by the cycle time, subtracting .010 from the result, and then
944  * rounded up to the nearest integer.  Unfortunately, none of their
945  * examples say what to do when the result of the subtraction is already
946  * an integer.  For now, assume that we still round up (so an interval
947  * of exactly 12.010 clock cycles will be printed as 13).
948  */
949 #define	__DDR4_ROUND(value) ((value - 10) / 1000 + 1)
950 
951 	aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
952 			   __DDR4_ROUND(tRCD_clocks),
953 			   __DDR4_ROUND(tRP_clocks),
954 			   __DDR4_ROUND(tRAS_clocks));
955 
956 #undef	__DDR4_VALUE
957 #undef	__DDR4_ROUND
958 }
959