1 /* $NetBSD: spdmem.c,v 1.31 2019/04/07 01:39:12 pgoyette Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Nicolas Joly 5 * Copyright (c) 2007 Paul Goyette 6 * Copyright (c) 2007 Tobias Nygren 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Serial Presence Detect (SPD) memory identification 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.31 2019/04/07 01:39:12 pgoyette Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/device.h> 42 #include <sys/endian.h> 43 #include <sys/sysctl.h> 44 #include <machine/bswap.h> 45 46 #include <dev/i2c/i2cvar.h> 47 #include <dev/ic/spdmemreg.h> 48 #include <dev/ic/spdmemvar.h> 49 50 /* Routines for decoding spd data */ 51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *); 52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *); 53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *, 54 int); 55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *); 56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *); 57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *); 58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *); 59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *); 60 61 static void decode_size_speed(device_t, const struct sysctlnode *, 62 int, int, int, int, bool, const char *, int); 63 static void decode_voltage_refresh(device_t, struct spdmem *); 64 65 #define IS_RAMBUS_TYPE (s->sm_len < 4) 66 67 static const char* const spdmem_basic_types[] = { 68 "unknown", 69 "FPM", 70 "EDO", 71 "Pipelined Nibble", 72 "SDRAM", 73 "ROM", 74 "DDR SGRAM", 75 "DDR SDRAM", 76 "DDR2 SDRAM", 77 "DDR2 SDRAM FB", 78 "DDR2 SDRAM FB Probe", 79 "DDR3 SDRAM", 80 "DDR4 SDRAM", 81 "unknown", 82 "DDR4E SDRAM", 83 "LPDDR3 SDRAM", 84 "LPDDR4 SDRAM" 85 }; 86 87 static const char* const spdmem_ddr4_module_types[] = { 88 "DDR4 Extended", 89 "DDR4 RDIMM", 90 "DDR4 UDIMM", 91 "DDR4 SO-DIMM", 92 "DDR4 Load-Reduced DIMM", 93 "DDR4 Mini-RDIMM", 94 "DDR4 Mini-UDIMM", 95 "DDR4 Reserved", 96 "DDR4 72Bit SO-RDIMM", 97 "DDR4 72Bit SO-UDIMM", 98 "DDR4 Undefined", 99 "DDR4 Reserved", 100 "DDR4 16Bit SO-DIMM", 101 "DDR4 32Bit SO-DIMM", 102 "DDR4 Reserved", 103 "DDR4 Undefined" 104 }; 105 106 static const char* const spdmem_superset_types[] = { 107 "unknown", 108 "ESDRAM", 109 "DDR ESDRAM", 110 "PEM EDO", 111 "PEM SDRAM" 112 }; 113 114 static const char* const spdmem_voltage_types[] = { 115 "TTL (5V tolerant)", 116 "LvTTL (not 5V tolerant)", 117 "HSTL 1.5V", 118 "SSTL 3.3V", 119 "SSTL 2.5V", 120 "SSTL 1.8V" 121 }; 122 123 static const char* const spdmem_refresh_types[] = { 124 "15.625us", 125 "3.9us", 126 "7.8us", 127 "31.3us", 128 "62.5us", 129 "125us" 130 }; 131 132 static const char* const spdmem_parity_types[] = { 133 "no parity or ECC", 134 "data parity", 135 "data ECC", 136 "data parity and ECC", 137 "cmd/addr parity", 138 "cmd/addr/data parity", 139 "cmd/addr parity, data ECC", 140 "cmd/addr/data parity, data ECC" 141 }; 142 143 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 }; 144 145 146 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */ 147 static const uint16_t spdmem_cycle_frac[] = { 148 0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 149 250, 333, 667, 750, 999, 999 150 }; 151 152 /* Format string for timing info */ 153 #define LATENCY "tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n" 154 155 /* CRC functions used for certain memory types */ 156 157 static uint16_t 158 spdcrc16(struct spdmem_softc *sc, int count) 159 { 160 uint16_t crc; 161 int i, j; 162 uint8_t val; 163 crc = 0; 164 for (j = 0; j <= count; j++) { 165 (sc->sc_read)(sc, j, &val); 166 crc = crc ^ val << 8; 167 for (i = 0; i < 8; ++i) 168 if (crc & 0x8000) 169 crc = crc << 1 ^ 0x1021; 170 else 171 crc = crc << 1; 172 } 173 return (crc & 0xFFFF); 174 } 175 176 int 177 spdmem_common_probe(struct spdmem_softc *sc) 178 { 179 int cksum = 0; 180 uint8_t i, val, spd_type; 181 int spd_len, spd_crc_cover; 182 uint16_t crc_calc, crc_spd; 183 184 /* Read failed means a device doesn't exist */ 185 if ((sc->sc_read)(sc, 2, &spd_type) != 0) 186 return 0; 187 188 /* Memory type should not be 0 */ 189 if (spd_type == 0x00) 190 return 0; 191 192 /* For older memory types, validate the checksum over 1st 63 bytes */ 193 if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) { 194 for (i = 0; i < 63; i++) { 195 (sc->sc_read)(sc, i, &val); 196 cksum += val; 197 } 198 199 (sc->sc_read)(sc, 63, &val); 200 201 if ((cksum & 0xff) != val) { 202 aprint_debug("spd checksum failed, calc = 0x%02x, " 203 "spd = 0x%02x\n", cksum, val); 204 return 0; 205 } else 206 return 1; 207 } 208 209 /* For DDR3 and FBDIMM, verify the CRC */ 210 else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) { 211 (sc->sc_read)(sc, 0, &val); 212 spd_len = val; 213 if (spd_len & SPDMEM_SPDCRC_116) 214 spd_crc_cover = 116; 215 else 216 spd_crc_cover = 125; 217 switch (spd_len & SPDMEM_SPDLEN_MASK) { 218 case SPDMEM_SPDLEN_128: 219 spd_len = 128; 220 break; 221 case SPDMEM_SPDLEN_176: 222 spd_len = 176; 223 break; 224 case SPDMEM_SPDLEN_256: 225 spd_len = 256; 226 break; 227 default: 228 return 0; 229 } 230 if (spd_crc_cover > spd_len) 231 return 0; 232 crc_calc = spdcrc16(sc, spd_crc_cover); 233 (sc->sc_read)(sc, 127, &val); 234 crc_spd = val << 8; 235 (sc->sc_read)(sc, 126, &val); 236 crc_spd |= val; 237 if (crc_calc != crc_spd) { 238 aprint_debug("crc16 failed, covers %d bytes, " 239 "calc = 0x%04x, spd = 0x%04x\n", 240 spd_crc_cover, crc_calc, crc_spd); 241 return 0; 242 } 243 return 1; 244 } else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 245 (sc->sc_read)(sc, 0, &val); 246 spd_len = val & 0x0f; 247 if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes)) 248 return 0; 249 spd_len = spd_rom_sizes[spd_len]; 250 spd_crc_cover = 125; /* For byte 0 to 125 */ 251 if (spd_crc_cover > spd_len) 252 return 0; 253 crc_calc = spdcrc16(sc, spd_crc_cover); 254 (sc->sc_read)(sc, 127, &val); 255 crc_spd = val << 8; 256 (sc->sc_read)(sc, 126, &val); 257 crc_spd |= val; 258 if (crc_calc != crc_spd) { 259 aprint_debug("crc16 failed, covers %d bytes, " 260 "calc = 0x%04x, spd = 0x%04x\n", 261 spd_crc_cover, crc_calc, crc_spd); 262 return 0; 263 } 264 /* 265 * We probably could also verify the CRC for the other 266 * "pages" of SPD data in blocks 1 and 2, but we'll do 267 * it some other time. 268 */ 269 return 1; 270 } 271 272 /* For unrecognized memory types, don't match at all */ 273 return 0; 274 } 275 276 void 277 spdmem_common_attach(struct spdmem_softc *sc, device_t self) 278 { 279 struct spdmem *s = &(sc->sc_spd_data); 280 const char *type; 281 const char *rambus_rev = "Reserved"; 282 int dimm_size; 283 unsigned int i, spd_len, spd_size; 284 const struct sysctlnode *node = NULL; 285 286 (sc->sc_read)(sc, 0, &s->sm_len); 287 (sc->sc_read)(sc, 1, &s->sm_size); 288 (sc->sc_read)(sc, 2, &s->sm_type); 289 290 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 291 /* 292 * An even newer encoding with one byte holding both 293 * the used-size and capacity values 294 */ 295 spd_len = s->sm_len & 0x0f; 296 spd_size = (s->sm_len >> 4) & 0x07; 297 298 spd_len = spd_rom_sizes[spd_len]; 299 spd_size *= 512; 300 301 } else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) { 302 /* 303 * FBDIMM and DDR3 (and probably all newer) have a different 304 * encoding of the SPD EEPROM used/total sizes 305 */ 306 spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK); 307 switch (s->sm_len & SPDMEM_SPDLEN_MASK) { 308 case SPDMEM_SPDLEN_128: 309 spd_len = 128; 310 break; 311 case SPDMEM_SPDLEN_176: 312 spd_len = 176; 313 break; 314 case SPDMEM_SPDLEN_256: 315 spd_len = 256; 316 break; 317 default: 318 spd_len = 64; 319 break; 320 } 321 } else { 322 spd_size = 1 << s->sm_size; 323 spd_len = s->sm_len; 324 if (spd_len < 64) 325 spd_len = 64; 326 } 327 if (spd_len > spd_size) 328 spd_len = spd_size; 329 if (spd_len > sizeof(struct spdmem)) 330 spd_len = sizeof(struct spdmem); 331 for (i = 3; i < spd_len; i++) 332 (sc->sc_read)(sc, i, &((uint8_t *)s)[i]); 333 334 /* 335 * Setup our sysctl subtree, hw.spdmemN 336 */ 337 sc->sc_sysctl_log = NULL; 338 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node, 339 0, CTLTYPE_NODE, 340 device_xname(self), NULL, NULL, 0, NULL, 0, 341 CTL_HW, CTL_CREATE, CTL_EOL); 342 if (node != NULL && spd_len != 0) 343 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 344 0, 345 CTLTYPE_STRUCT, "spd_data", 346 SYSCTL_DESCR("raw spd data"), NULL, 347 0, s, spd_len, 348 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 349 350 /* 351 * Decode and print key SPD contents 352 */ 353 if (IS_RAMBUS_TYPE) { 354 if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS) 355 type = "Rambus"; 356 else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS) 357 type = "Direct Rambus"; 358 else 359 type = "Rambus (unknown)"; 360 361 switch (s->sm_len) { 362 case 0: 363 rambus_rev = "Invalid"; 364 break; 365 case 1: 366 rambus_rev = "0.7"; 367 break; 368 case 2: 369 rambus_rev = "1.0"; 370 break; 371 default: 372 rambus_rev = "Reserved"; 373 break; 374 } 375 } else { 376 if (s->sm_type < __arraycount(spdmem_basic_types)) 377 type = spdmem_basic_types[s->sm_type]; 378 else 379 type = "unknown memory type"; 380 381 if (s->sm_type == SPDMEM_MEMTYPE_EDO && 382 s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM) 383 type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM]; 384 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && 385 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM) 386 type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM]; 387 if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM && 388 s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM) 389 type = 390 spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM]; 391 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && 392 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) { 393 type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM]; 394 } 395 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM && 396 s->sm_ddr4.ddr4_mod_type < 397 __arraycount(spdmem_ddr4_module_types)) { 398 type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]; 399 } 400 } 401 402 strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN); 403 404 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 405 /* 406 * The latest spec (DDR4 SPD Document Release 3) defines 407 * NVDIMM Hybrid only. 408 */ 409 if ((s->sm_ddr4.ddr4_hybrid) 410 && (s->sm_ddr4.ddr4_hybrid_media == 1)) 411 strlcat(sc->sc_type, " NVDIMM hybrid", 412 SPDMEM_TYPE_MAXLEN); 413 } 414 415 if (node != NULL) 416 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 417 0, 418 CTLTYPE_STRING, "mem_type", 419 SYSCTL_DESCR("memory module type"), NULL, 420 0, sc->sc_type, 0, 421 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 422 423 if (IS_RAMBUS_TYPE) { 424 aprint_naive("\n"); 425 aprint_normal("\n"); 426 aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev); 427 dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13); 428 if (dimm_size >= 1024) 429 aprint_normal(", %dGB\n", dimm_size / 1024); 430 else 431 aprint_normal(", %dMB\n", dimm_size); 432 433 /* No further decode for RAMBUS memory */ 434 return; 435 } 436 switch (s->sm_type) { 437 case SPDMEM_MEMTYPE_EDO: 438 case SPDMEM_MEMTYPE_FPM: 439 decode_edofpm(node, self, s); 440 break; 441 case SPDMEM_MEMTYPE_ROM: 442 decode_rom(node, self, s); 443 break; 444 case SPDMEM_MEMTYPE_SDRAM: 445 decode_sdram(node, self, s, spd_len); 446 break; 447 case SPDMEM_MEMTYPE_DDRSDRAM: 448 decode_ddr(node, self, s); 449 break; 450 case SPDMEM_MEMTYPE_DDR2SDRAM: 451 decode_ddr2(node, self, s); 452 break; 453 case SPDMEM_MEMTYPE_DDR3SDRAM: 454 decode_ddr3(node, self, s); 455 break; 456 case SPDMEM_MEMTYPE_FBDIMM: 457 case SPDMEM_MEMTYPE_FBDIMM_PROBE: 458 decode_fbdimm(node, self, s); 459 break; 460 case SPDMEM_MEMTYPE_DDR4SDRAM: 461 decode_ddr4(node, self, s); 462 break; 463 } 464 465 /* Dump SPD */ 466 for (i = 0; i < spd_len; i += 16) { 467 unsigned int j, k; 468 aprint_debug_dev(self, "0x%02x:", i); 469 k = (spd_len > (i + 16)) ? i + 16 : spd_len; 470 for (j = i; j < k; j++) 471 aprint_debug(" %02x", ((uint8_t *)s)[j]); 472 aprint_debug("\n"); 473 } 474 } 475 476 int 477 spdmem_common_detach(struct spdmem_softc *sc, device_t self) 478 { 479 sysctl_teardown(&sc->sc_sysctl_log); 480 481 return 0; 482 } 483 484 static void 485 decode_size_speed(device_t self, const struct sysctlnode *node, 486 int dimm_size, int cycle_time, int d_clk, int bits, 487 bool round, const char *ddr_type_string, int speed) 488 { 489 int p_clk; 490 struct spdmem_softc *sc = device_private(self); 491 492 if (dimm_size < 1024) 493 aprint_normal("%dMB", dimm_size); 494 else 495 aprint_normal("%dGB", dimm_size / 1024); 496 if (node != NULL) 497 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 498 CTLFLAG_IMMEDIATE, 499 CTLTYPE_INT, "size", 500 SYSCTL_DESCR("module size in MB"), NULL, 501 dimm_size, NULL, 0, 502 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 503 504 if (cycle_time == 0) { 505 aprint_normal("\n"); 506 return; 507 } 508 509 /* 510 * Calculate p_clk first, since for DDR3 we need maximum significance. 511 * DDR3 rating is not rounded to a multiple of 100. This results in 512 * cycle_time of 1.5ns displayed as PC3-10666. 513 * 514 * For SDRAM, the speed is provided by the caller so we use it. 515 */ 516 d_clk *= 1000 * 1000; 517 if (speed) 518 p_clk = speed; 519 else 520 p_clk = (d_clk * bits) / 8 / cycle_time; 521 d_clk = ((d_clk + cycle_time / 2) ) / cycle_time; 522 if (round) { 523 if ((p_clk % 100) >= 50) 524 p_clk += 50; 525 p_clk -= p_clk % 100; 526 } 527 aprint_normal(", %dMHz (%s-%d)\n", 528 d_clk, ddr_type_string, p_clk); 529 if (node != NULL) 530 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 531 CTLFLAG_IMMEDIATE, 532 CTLTYPE_INT, "speed", 533 SYSCTL_DESCR("memory speed in MHz"), 534 NULL, d_clk, NULL, 0, 535 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 536 } 537 538 static void 539 decode_voltage_refresh(device_t self, struct spdmem *s) 540 { 541 const char *voltage, *refresh; 542 543 if (s->sm_voltage < __arraycount(spdmem_voltage_types)) 544 voltage = spdmem_voltage_types[s->sm_voltage]; 545 else 546 voltage = "unknown"; 547 548 if (s->sm_refresh < __arraycount(spdmem_refresh_types)) 549 refresh = spdmem_refresh_types[s->sm_refresh]; 550 else 551 refresh = "unknown"; 552 553 aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n", 554 voltage, refresh, 555 s->sm_selfrefresh?" (self-refreshing)":""); 556 } 557 558 static void 559 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s) 560 { 561 562 aprint_naive("\n"); 563 aprint_normal("\n"); 564 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 565 566 aprint_normal("\n"); 567 aprint_verbose_dev(self, 568 "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n", 569 s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks, 570 s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC); 571 } 572 573 static void 574 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s) 575 { 576 577 aprint_naive("\n"); 578 aprint_normal("\n"); 579 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 580 581 aprint_normal("\n"); 582 aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n", 583 s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks); 584 } 585 586 static void 587 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s, 588 int spd_len) 589 { 590 int dimm_size, cycle_time, bits, tAA, i, speed, freq; 591 592 aprint_naive("\n"); 593 aprint_normal("\n"); 594 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 595 596 aprint_normal("%s, %s, ", 597 (s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)? 598 " (registered)":"", 599 (s->sm_config < __arraycount(spdmem_parity_types))? 600 spdmem_parity_types[s->sm_config]:"invalid parity"); 601 602 dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17); 603 dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip; 604 605 cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 + 606 s->sm_sdr.sdr_cycle_tenths * 100; 607 bits = le16toh(s->sm_sdr.sdr_datawidth); 608 if (s->sm_config == 1 || s->sm_config == 2) 609 bits -= 8; 610 611 /* Calculate speed here - from OpenBSD */ 612 if (spd_len >= 128) 613 freq = ((uint8_t *)s)[126]; 614 else 615 freq = 0; 616 switch (freq) { 617 /* 618 * Must check cycle time since some PC-133 DIMMs 619 * actually report PC-100 620 */ 621 case 100: 622 case 133: 623 if (cycle_time < 8000) 624 speed = 133; 625 else 626 speed = 100; 627 break; 628 case 0x66: /* Legacy DIMMs use _hex_ 66! */ 629 default: 630 speed = 66; 631 } 632 decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE, 633 "PC", speed); 634 635 aprint_verbose_dev(self, 636 "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n", 637 s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks, 638 s->sm_sdr.sdr_banks_per_chip, cycle_time/1000, 639 (cycle_time % 1000) / 100); 640 641 tAA = 0; 642 for (i = 0; i < 8; i++) 643 if (s->sm_sdr.sdr_tCAS & (1 << i)) 644 tAA = i; 645 tAA++; 646 aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD, 647 s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS); 648 649 decode_voltage_refresh(self, s); 650 } 651 652 static void 653 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s) 654 { 655 int dimm_size, cycle_time, bits, tAA, i; 656 657 aprint_naive("\n"); 658 aprint_normal("\n"); 659 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 660 661 aprint_normal("%s, %s, ", 662 (s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)? 663 " (registered)":"", 664 (s->sm_config < __arraycount(spdmem_parity_types))? 665 spdmem_parity_types[s->sm_config]:"invalid parity"); 666 667 dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17); 668 dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip; 669 670 cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 + 671 spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths]; 672 bits = le16toh(s->sm_ddr.ddr_datawidth); 673 if (s->sm_config == 1 || s->sm_config == 2) 674 bits -= 8; 675 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 676 "PC", 0); 677 678 aprint_verbose_dev(self, 679 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n", 680 s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks, 681 s->sm_ddr.ddr_banks_per_chip, cycle_time/1000, 682 (cycle_time % 1000 + 50) / 100); 683 684 tAA = 0; 685 for (i = 2; i < 8; i++) 686 if (s->sm_ddr.ddr_tCAS & (1 << i)) 687 tAA = i; 688 tAA /= 2; 689 690 #define __DDR_ROUND(scale, field) \ 691 ((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time) 692 693 aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD), 694 __DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS)); 695 696 #undef __DDR_ROUND 697 698 decode_voltage_refresh(self, s); 699 } 700 701 static void 702 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s) 703 { 704 int dimm_size, cycle_time, bits, tAA, i; 705 706 aprint_naive("\n"); 707 aprint_normal("\n"); 708 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 709 710 aprint_normal("%s, %s, ", 711 (s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)? 712 " (registered)":"", 713 (s->sm_config < __arraycount(spdmem_parity_types))? 714 spdmem_parity_types[s->sm_config]:"invalid parity"); 715 716 dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17); 717 dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) * 718 s->sm_ddr2.ddr2_banks_per_chip; 719 720 cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 + 721 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac]; 722 bits = s->sm_ddr2.ddr2_datawidth; 723 if ((s->sm_config & 0x03) != 0) 724 bits -= 8; 725 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 726 "PC2", 0); 727 728 aprint_verbose_dev(self, 729 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n", 730 s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols, 731 s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip, 732 cycle_time / 1000, (cycle_time % 1000 + 5) /10 ); 733 734 tAA = 0; 735 for (i = 2; i < 8; i++) 736 if (s->sm_ddr2.ddr2_tCAS & (1 << i)) 737 tAA = i; 738 739 #define __DDR2_ROUND(scale, field) \ 740 ((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time) 741 742 aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD), 743 __DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS)); 744 745 #undef __DDR_ROUND 746 747 decode_voltage_refresh(self, s); 748 } 749 750 static void 751 print_part(const char *part, size_t pnsize) 752 { 753 const char *p = memchr(part, ' ', pnsize); 754 if (p == NULL) 755 p = part + pnsize; 756 aprint_normal(": %.*s\n", (int)(p - part), part); 757 } 758 759 static void 760 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s) 761 { 762 int dimm_size, cycle_time, bits; 763 764 aprint_naive("\n"); 765 print_part(s->sm_ddr3.ddr3_part, sizeof(s->sm_ddr3.ddr3_part)); 766 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 767 768 if (s->sm_ddr3.ddr3_mod_type == 769 SPDMEM_DDR3_TYPE_MINI_RDIMM || 770 s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM) 771 aprint_normal(" (registered)"); 772 aprint_normal(", %sECC, %stemp-sensor, ", 773 (s->sm_ddr3.ddr3_hasECC)?"":"no ", 774 (s->sm_ddr3.ddr3_has_therm_sensor)?"":"no "); 775 776 /* 777 * DDR3 size specification is quite different from others 778 * 779 * Module capacity is defined as 780 * Chip_Capacity_in_bits / 8bits-per-byte * 781 * external_bus_width / internal_bus_width 782 * We further divide by 2**20 to get our answer in MB 783 */ 784 dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 + 785 (s->sm_ddr3.ddr3_datawidth + 3) - 786 (s->sm_ddr3.ddr3_chipwidth + 2); 787 dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1); 788 789 cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend + 790 (s->sm_ddr3.ddr3_mtb_divisor / 2)) / 791 s->sm_ddr3.ddr3_mtb_divisor; 792 cycle_time *= s->sm_ddr3.ddr3_tCKmin; 793 bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3); 794 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE, 795 "PC3", 0); 796 797 aprint_verbose_dev(self, 798 "%d rows, %d cols, %d log. banks, %d phys. banks, " 799 "%d.%03dns cycle time\n", 800 s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12, 801 1 << (s->sm_ddr3.ddr3_logbanks + 3), 802 s->sm_ddr3.ddr3_physbanks + 1, 803 cycle_time/1000, cycle_time % 1000); 804 805 #define __DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin) 806 807 aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin), 808 __DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin), 809 (s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) / 810 s->sm_ddr3.ddr3_tCKmin); 811 812 #undef __DDR3_CYCLES 813 814 /* For DDR3, Voltage is written in another area */ 815 if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V 816 || s->sm_ddr3.ddr3_125V) { 817 aprint_verbose("%s:", device_xname(self)); 818 if (!s->sm_ddr3.ddr3_NOT15V) 819 aprint_verbose(" 1.5V"); 820 if (s->sm_ddr3.ddr3_135V) 821 aprint_verbose(" 1.35V"); 822 if (s->sm_ddr3.ddr3_125V) 823 aprint_verbose(" 1.25V"); 824 aprint_verbose(" operable\n"); 825 } 826 } 827 828 static void 829 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s) 830 { 831 int dimm_size, cycle_time, bits; 832 833 aprint_naive("\n"); 834 aprint_normal("\n"); 835 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 836 837 /* 838 * FB-DIMM module size calculation is very much like DDR3 839 */ 840 dimm_size = s->sm_fbd.fbdimm_rows + 12 + 841 s->sm_fbd.fbdimm_cols + 9 - 20 - 3; 842 dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2)); 843 844 cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend + 845 (s->sm_fbd.fbdimm_mtb_divisor / 2)) / 846 s->sm_fbd.fbdimm_mtb_divisor; 847 bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2); 848 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 849 "PC2", 0); 850 851 aprint_verbose_dev(self, 852 "%d rows, %d cols, %d banks, %d.%02dns cycle time\n", 853 s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols, 854 1 << (s->sm_fbd.fbdimm_banks + 2), 855 cycle_time / 1000, (cycle_time % 1000 + 5) /10 ); 856 857 #define __FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin) 858 859 aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin), 860 __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin), 861 (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) / 862 s->sm_fbd.fbdimm_tCKmin); 863 864 #undef __FBDIMM_CYCLES 865 866 decode_voltage_refresh(self, s); 867 } 868 869 static void 870 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s) 871 { 872 int dimm_size, cycle_time, ranks; 873 int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks; 874 875 aprint_naive("\n"); 876 print_part(s->sm_ddr4.ddr4_part_number, 877 sizeof(s->sm_ddr4.ddr4_part_number)); 878 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 879 if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types)) 880 aprint_normal(" (%s)", 881 spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]); 882 aprint_normal(", %sECC, %stemp-sensor, ", 883 (s->sm_ddr4.ddr4_bus_width_extension) ? "" : "no ", 884 (s->sm_ddr4.ddr4_has_therm_sensor) ? "" : "no "); 885 886 /* 887 * DDR4 size calculation from JEDEC spec 888 * 889 * Module capacity in bytes is defined as 890 * Chip_Capacity_in_bits / 8bits-per-byte * 891 * primary_bus_width / DRAM_width * 892 * logical_ranks_per_DIMM 893 * 894 * logical_ranks_per DIMM equals package_ranks, but multiply 895 * by diecount for 3DS packages 896 * 897 * We further divide by 2**20 to get our answer in MB 898 */ 899 dimm_size = (s->sm_ddr4.ddr4_capacity + 28) /* chip_capacity */ 900 - 20 /* convert to MB */ 901 - 3 /* bits --> bytes */ 902 + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */ 903 switch (s->sm_ddr4.ddr4_device_width) { /* DRAM width */ 904 case 0: dimm_size -= 2; 905 break; 906 case 1: dimm_size -= 3; 907 break; 908 case 2: dimm_size -= 4; 909 break; 910 case 4: dimm_size -= 5; 911 break; 912 default: 913 dimm_size = -1; /* flag invalid value */ 914 } 915 if (dimm_size >= 0) { 916 dimm_size = (1 << dimm_size) * 917 (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */ 918 if (s->sm_ddr4.ddr4_signal_loading == 2) { 919 dimm_size *= (s->sm_ddr4.ddr4_diecount + 1); 920 } 921 } 922 923 /* 924 * Note that the ddr4_xxx_ftb fields are actually signed offsets from 925 * the corresponding mtb value, so we might have to subtract 256! 926 */ 927 #define __DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 + \ 928 s->sm_ddr4.ddr4_##field##_ftb) - \ 929 ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0)) 930 /* 931 * For now, the only value for mtb is 0 = 125ps, and ftb = 1ps 932 * so we don't need to figure out the time-base units - just 933 * hard-code them for now. 934 */ 935 cycle_time = __DDR4_VALUE(tCKAVGmin); 936 decode_size_speed(self, node, dimm_size, cycle_time, 2, 937 1 << (s->sm_ddr4.ddr4_primary_bus_width + 3), 938 TRUE, "PC4", 0); 939 940 ranks = s->sm_ddr4.ddr4_package_ranks + 1; 941 aprint_verbose_dev(self, 942 "%d rows, %d cols, %d ranks%s, %d banks/group, %d bank groups\n", 943 s->sm_ddr4.ddr4_rows + 12, s->sm_ddr4.ddr4_cols + 9, 944 ranks, (ranks > 1) ? ((s->sm_ddr4.ddr4_rank_mix == 1) 945 ? " (asymmetric)" : " (symmetric)") : "", 946 1 << (2 + s->sm_ddr4.ddr4_logbanks), 947 1 << s->sm_ddr4.ddr4_bankgroups); 948 949 aprint_verbose_dev(self, "%d.%03dns cycle time\n", 950 cycle_time / 1000, cycle_time % 1000); 951 952 tAA_clocks = __DDR4_VALUE(tAAmin) * 1000 / cycle_time; 953 tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time; 954 tRP_clocks = __DDR4_VALUE(tRPmin) * 1000 / cycle_time; 955 tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 + 956 s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time; 957 958 /* 959 * Per JEDEC spec, rounding is done by taking the time value, dividing 960 * by the cycle time, subtracting .010 from the result, and then 961 * rounded up to the nearest integer. Unfortunately, none of their 962 * examples say what to do when the result of the subtraction is already 963 * an integer. For now, assume that we still round up (so an interval 964 * of exactly 12.010 clock cycles will be printed as 13). 965 */ 966 #define __DDR4_ROUND(value) ((value - 10) / 1000 + 1) 967 968 aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks), 969 __DDR4_ROUND(tRCD_clocks), 970 __DDR4_ROUND(tRP_clocks), 971 __DDR4_ROUND(tRAS_clocks)); 972 973 #undef __DDR4_VALUE 974 #undef __DDR4_ROUND 975 } 976