1 /* $NetBSD: spdmem.c,v 1.35 2020/03/24 03:47:39 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Nicolas Joly 5 * Copyright (c) 2007 Paul Goyette 6 * Copyright (c) 2007 Tobias Nygren 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Serial Presence Detect (SPD) memory identification 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.35 2020/03/24 03:47:39 msaitoh Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/device.h> 42 #include <sys/endian.h> 43 #include <sys/sysctl.h> 44 #include <machine/bswap.h> 45 46 #include <dev/i2c/i2cvar.h> 47 #include <dev/ic/spdmemreg.h> 48 #include <dev/ic/spdmemvar.h> 49 50 /* Routines for decoding spd data */ 51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *); 52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *); 53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *, 54 int); 55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *); 56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *); 57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *); 58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *); 59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *); 60 61 static void decode_size_speed(device_t, const struct sysctlnode *, 62 int, int, int, int, bool, const char *, int); 63 static void decode_voltage_refresh(device_t, struct spdmem *); 64 65 #define IS_RAMBUS_TYPE (s->sm_len < 4) 66 67 static const char* const spdmem_basic_types[] = { 68 "unknown", 69 "FPM", 70 "EDO", 71 "Pipelined Nibble", 72 "SDRAM", 73 "ROM", 74 "DDR SGRAM", 75 "DDR SDRAM", 76 "DDR2 SDRAM", 77 "DDR2 SDRAM FB", 78 "DDR2 SDRAM FB Probe", 79 "DDR3 SDRAM", 80 "DDR4 SDRAM", 81 "unknown", 82 "DDR4E SDRAM", 83 "LPDDR3 SDRAM", 84 "LPDDR4 SDRAM" 85 }; 86 87 static const char* const spdmem_ddr4_module_types[] = { 88 "DDR4 Extended", 89 "DDR4 RDIMM", 90 "DDR4 UDIMM", 91 "DDR4 SO-DIMM", 92 "DDR4 Load-Reduced DIMM", 93 "DDR4 Mini-RDIMM", 94 "DDR4 Mini-UDIMM", 95 "DDR4 Reserved", 96 "DDR4 72Bit SO-RDIMM", 97 "DDR4 72Bit SO-UDIMM", 98 "DDR4 Undefined", 99 "DDR4 Reserved", 100 "DDR4 16Bit SO-DIMM", 101 "DDR4 32Bit SO-DIMM", 102 "DDR4 Reserved", 103 "DDR4 Undefined" 104 }; 105 106 static const char* const spdmem_superset_types[] = { 107 "unknown", 108 "ESDRAM", 109 "DDR ESDRAM", 110 "PEM EDO", 111 "PEM SDRAM" 112 }; 113 114 static const char* const spdmem_voltage_types[] = { 115 "TTL (5V tolerant)", 116 "LvTTL (not 5V tolerant)", 117 "HSTL 1.5V", 118 "SSTL 3.3V", 119 "SSTL 2.5V", 120 "SSTL 1.8V" 121 }; 122 123 static const char* const spdmem_refresh_types[] = { 124 "15.625us", 125 "3.9us", 126 "7.8us", 127 "31.3us", 128 "62.5us", 129 "125us" 130 }; 131 132 static const char* const spdmem_parity_types[] = { 133 "no parity or ECC", 134 "data parity", 135 "data ECC", 136 "data parity and ECC", 137 "cmd/addr parity", 138 "cmd/addr/data parity", 139 "cmd/addr parity, data ECC", 140 "cmd/addr/data parity, data ECC" 141 }; 142 143 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 }; 144 145 146 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */ 147 static const uint16_t spdmem_cycle_frac[] = { 148 0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 149 250, 333, 667, 750, 999, 999 150 }; 151 152 /* Format string for timing info */ 153 #define LATENCY "tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n" 154 155 /* CRC functions used for certain memory types */ 156 157 static uint16_t 158 spdcrc16(struct spdmem_softc *sc, int count) 159 { 160 uint16_t crc; 161 int i, j; 162 uint8_t val; 163 crc = 0; 164 for (j = 0; j <= count; j++) { 165 (sc->sc_read)(sc, j, &val); 166 crc = crc ^ val << 8; 167 for (i = 0; i < 8; ++i) 168 if (crc & 0x8000) 169 crc = crc << 1 ^ 0x1021; 170 else 171 crc = crc << 1; 172 } 173 return (crc & 0xFFFF); 174 } 175 176 int 177 spdmem_common_probe(struct spdmem_softc *sc) 178 { 179 int cksum = 0; 180 uint8_t i, val, spd_type; 181 int spd_len, spd_crc_cover; 182 uint16_t crc_calc, crc_spd; 183 184 /* Read failed means a device doesn't exist */ 185 if ((sc->sc_read)(sc, 2, &spd_type) != 0) 186 return 0; 187 188 /* Memory type should not be 0 */ 189 if (spd_type == 0x00) 190 return 0; 191 192 /* For older memory types, validate the checksum over 1st 63 bytes */ 193 if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) { 194 for (i = 0; i < 63; i++) { 195 (sc->sc_read)(sc, i, &val); 196 cksum += val; 197 } 198 199 (sc->sc_read)(sc, 63, &val); 200 201 if ((cksum & 0xff) != val) { 202 aprint_debug("spd checksum failed, calc = 0x%02x, " 203 "spd = 0x%02x\n", cksum, val); 204 return 0; 205 } else 206 return 1; 207 } 208 209 /* For DDR3 and FBDIMM, verify the CRC */ 210 else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) { 211 (sc->sc_read)(sc, 0, &val); 212 spd_len = val; 213 if (spd_len & SPDMEM_SPDCRC_116) 214 spd_crc_cover = 116; 215 else 216 spd_crc_cover = 125; 217 switch (spd_len & SPDMEM_SPDLEN_MASK) { 218 case SPDMEM_SPDLEN_128: 219 spd_len = 128; 220 break; 221 case SPDMEM_SPDLEN_176: 222 spd_len = 176; 223 break; 224 case SPDMEM_SPDLEN_256: 225 spd_len = 256; 226 break; 227 default: 228 return 0; 229 } 230 if (spd_crc_cover > spd_len) 231 return 0; 232 crc_calc = spdcrc16(sc, spd_crc_cover); 233 (sc->sc_read)(sc, 127, &val); 234 crc_spd = val << 8; 235 (sc->sc_read)(sc, 126, &val); 236 crc_spd |= val; 237 if (crc_calc != crc_spd) { 238 aprint_debug("crc16 failed, covers %d bytes, " 239 "calc = 0x%04x, spd = 0x%04x\n", 240 spd_crc_cover, crc_calc, crc_spd); 241 return 0; 242 } 243 return 1; 244 } else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 245 (sc->sc_read)(sc, 0, &val); 246 spd_len = val & 0x0f; 247 if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes)) 248 return 0; 249 spd_len = spd_rom_sizes[spd_len]; 250 spd_crc_cover = 125; /* For byte 0 to 125 */ 251 if (spd_crc_cover > spd_len) 252 return 0; 253 crc_calc = spdcrc16(sc, spd_crc_cover); 254 (sc->sc_read)(sc, 127, &val); 255 crc_spd = val << 8; 256 (sc->sc_read)(sc, 126, &val); 257 crc_spd |= val; 258 if (crc_calc != crc_spd) { 259 aprint_debug("crc16 failed, covers %d bytes, " 260 "calc = 0x%04x, spd = 0x%04x\n", 261 spd_crc_cover, crc_calc, crc_spd); 262 return 0; 263 } 264 /* 265 * We probably could also verify the CRC for the other 266 * "pages" of SPD data in blocks 1 and 2, but we'll do 267 * it some other time. 268 */ 269 return 1; 270 } 271 272 /* For unrecognized memory types, don't match at all */ 273 return 0; 274 } 275 276 void 277 spdmem_common_attach(struct spdmem_softc *sc, device_t self) 278 { 279 struct spdmem *s = &(sc->sc_spd_data); 280 const char *type; 281 const char *rambus_rev = "Reserved"; 282 int dimm_size; 283 unsigned int i, spd_len, spd_size; 284 const struct sysctlnode *node = NULL; 285 286 (sc->sc_read)(sc, 0, &s->sm_len); 287 (sc->sc_read)(sc, 1, &s->sm_size); 288 (sc->sc_read)(sc, 2, &s->sm_type); 289 290 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 291 /* 292 * An even newer encoding with one byte holding both 293 * the used-size and capacity values 294 */ 295 spd_len = s->sm_len & 0x0f; 296 spd_size = (s->sm_len >> 4) & 0x07; 297 298 spd_len = spd_rom_sizes[spd_len]; 299 spd_size *= 512; 300 301 } else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) { 302 /* 303 * FBDIMM and DDR3 (and probably all newer) have a different 304 * encoding of the SPD EEPROM used/total sizes 305 */ 306 spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK); 307 switch (s->sm_len & SPDMEM_SPDLEN_MASK) { 308 case SPDMEM_SPDLEN_128: 309 spd_len = 128; 310 break; 311 case SPDMEM_SPDLEN_176: 312 spd_len = 176; 313 break; 314 case SPDMEM_SPDLEN_256: 315 spd_len = 256; 316 break; 317 default: 318 spd_len = 64; 319 break; 320 } 321 } else { 322 spd_size = 1 << s->sm_size; 323 spd_len = s->sm_len; 324 if (spd_len < 64) 325 spd_len = 64; 326 } 327 if (spd_len > spd_size) 328 spd_len = spd_size; 329 if (spd_len > sizeof(struct spdmem)) 330 spd_len = sizeof(struct spdmem); 331 for (i = 3; i < spd_len; i++) 332 (sc->sc_read)(sc, i, &((uint8_t *)s)[i]); 333 334 /* 335 * Setup our sysctl subtree, hw.spdmemN 336 */ 337 sc->sc_sysctl_log = NULL; 338 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node, 339 0, CTLTYPE_NODE, 340 device_xname(self), NULL, NULL, 0, NULL, 0, 341 CTL_HW, CTL_CREATE, CTL_EOL); 342 if (node != NULL && spd_len != 0) 343 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 344 0, 345 CTLTYPE_STRUCT, "spd_data", 346 SYSCTL_DESCR("raw spd data"), NULL, 347 0, s, spd_len, 348 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 349 350 /* 351 * Decode and print key SPD contents 352 */ 353 if (IS_RAMBUS_TYPE) { 354 if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS) 355 type = "Rambus"; 356 else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS) 357 type = "Direct Rambus"; 358 else 359 type = "Rambus (unknown)"; 360 361 switch (s->sm_len) { 362 case 0: 363 rambus_rev = "Invalid"; 364 break; 365 case 1: 366 rambus_rev = "0.7"; 367 break; 368 case 2: 369 rambus_rev = "1.0"; 370 break; 371 default: 372 rambus_rev = "Reserved"; 373 break; 374 } 375 } else { 376 if (s->sm_type < __arraycount(spdmem_basic_types)) 377 type = spdmem_basic_types[s->sm_type]; 378 else 379 type = "unknown memory type"; 380 381 if (s->sm_type == SPDMEM_MEMTYPE_EDO && 382 s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM) 383 type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM]; 384 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && 385 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM) 386 type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM]; 387 if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM && 388 s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM) 389 type = 390 spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM]; 391 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && 392 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) { 393 type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM]; 394 } 395 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM && 396 s->sm_ddr4.ddr4_mod_type < 397 __arraycount(spdmem_ddr4_module_types)) { 398 type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]; 399 } 400 } 401 402 strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN); 403 404 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) { 405 /* 406 * The latest spec (DDR4 SPD Document Release 3) defines 407 * NVDIMM Hybrid only. 408 */ 409 if ((s->sm_ddr4.ddr4_hybrid) 410 && (s->sm_ddr4.ddr4_hybrid_media == 1)) 411 strlcat(sc->sc_type, " NVDIMM hybrid", 412 SPDMEM_TYPE_MAXLEN); 413 } 414 415 if (node != NULL) 416 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 417 0, 418 CTLTYPE_STRING, "mem_type", 419 SYSCTL_DESCR("memory module type"), NULL, 420 0, sc->sc_type, 0, 421 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 422 423 if (IS_RAMBUS_TYPE) { 424 aprint_naive("\n"); 425 aprint_normal("\n"); 426 aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev); 427 dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13); 428 if (dimm_size >= 1024) 429 aprint_normal(", %dGB\n", dimm_size / 1024); 430 else 431 aprint_normal(", %dMB\n", dimm_size); 432 433 /* No further decode for RAMBUS memory */ 434 return; 435 } 436 switch (s->sm_type) { 437 case SPDMEM_MEMTYPE_EDO: 438 case SPDMEM_MEMTYPE_FPM: 439 decode_edofpm(node, self, s); 440 break; 441 case SPDMEM_MEMTYPE_ROM: 442 decode_rom(node, self, s); 443 break; 444 case SPDMEM_MEMTYPE_SDRAM: 445 decode_sdram(node, self, s, spd_len); 446 break; 447 case SPDMEM_MEMTYPE_DDRSDRAM: 448 decode_ddr(node, self, s); 449 break; 450 case SPDMEM_MEMTYPE_DDR2SDRAM: 451 decode_ddr2(node, self, s); 452 break; 453 case SPDMEM_MEMTYPE_DDR3SDRAM: 454 decode_ddr3(node, self, s); 455 break; 456 case SPDMEM_MEMTYPE_FBDIMM: 457 case SPDMEM_MEMTYPE_FBDIMM_PROBE: 458 decode_fbdimm(node, self, s); 459 break; 460 case SPDMEM_MEMTYPE_DDR4SDRAM: 461 decode_ddr4(node, self, s); 462 break; 463 } 464 465 /* Dump SPD */ 466 for (i = 0; i < spd_len; i += 16) { 467 unsigned int j, k; 468 aprint_debug_dev(self, "0x%02x:", i); 469 k = (spd_len > (i + 16)) ? i + 16 : spd_len; 470 for (j = i; j < k; j++) 471 aprint_debug(" %02x", ((uint8_t *)s)[j]); 472 aprint_debug("\n"); 473 } 474 } 475 476 int 477 spdmem_common_detach(struct spdmem_softc *sc, device_t self) 478 { 479 sysctl_teardown(&sc->sc_sysctl_log); 480 481 return 0; 482 } 483 484 static void 485 decode_size_speed(device_t self, const struct sysctlnode *node, 486 int dimm_size, int cycle_time, int d_clk, int bits, 487 bool round, const char *ddr_type_string, int speed) 488 { 489 int p_clk; 490 struct spdmem_softc *sc = device_private(self); 491 492 if (dimm_size < 1024) 493 aprint_normal("%dMB", dimm_size); 494 else 495 aprint_normal("%dGB", dimm_size / 1024); 496 if (node != NULL) 497 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 498 CTLFLAG_IMMEDIATE, 499 CTLTYPE_INT, "size", 500 SYSCTL_DESCR("module size in MB"), NULL, 501 dimm_size, NULL, 0, 502 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 503 504 if (cycle_time == 0) { 505 aprint_normal("\n"); 506 return; 507 } 508 509 /* 510 * Calculate p_clk first, since for DDR3 we need maximum significance. 511 * DDR3 rating is not rounded to a multiple of 100. This results in 512 * cycle_time of 1.5ns displayed as PC3-10666. 513 * 514 * For SDRAM, the speed is provided by the caller so we use it. 515 */ 516 d_clk *= 1000 * 1000; 517 if (speed) 518 p_clk = speed; 519 else 520 p_clk = (d_clk * bits) / 8 / cycle_time; 521 d_clk = ((d_clk + cycle_time / 2) ) / cycle_time; 522 if (round) { 523 if ((p_clk % 100) >= 50) 524 p_clk += 50; 525 p_clk -= p_clk % 100; 526 } 527 aprint_normal(", %dMHz (%s-%d)\n", 528 d_clk, ddr_type_string, p_clk); 529 if (node != NULL) 530 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL, 531 CTLFLAG_IMMEDIATE, 532 CTLTYPE_INT, "speed", 533 SYSCTL_DESCR("memory speed in MHz"), 534 NULL, d_clk, NULL, 0, 535 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); 536 } 537 538 static void 539 decode_voltage_refresh(device_t self, struct spdmem *s) 540 { 541 const char *voltage, *refresh; 542 543 if (s->sm_voltage < __arraycount(spdmem_voltage_types)) 544 voltage = spdmem_voltage_types[s->sm_voltage]; 545 else 546 voltage = "unknown"; 547 548 if (s->sm_refresh < __arraycount(spdmem_refresh_types)) 549 refresh = spdmem_refresh_types[s->sm_refresh]; 550 else 551 refresh = "unknown"; 552 553 aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n", 554 voltage, refresh, 555 s->sm_selfrefresh?" (self-refreshing)":""); 556 } 557 558 static void 559 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s) 560 { 561 562 aprint_naive("\n"); 563 aprint_normal("\n"); 564 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 565 566 aprint_normal("\n"); 567 aprint_verbose_dev(self, 568 "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n", 569 s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks, 570 s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC); 571 } 572 573 static void 574 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s) 575 { 576 577 aprint_naive("\n"); 578 aprint_normal("\n"); 579 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 580 581 aprint_normal("\n"); 582 aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n", 583 s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks); 584 } 585 586 static void 587 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s, 588 int spd_len) 589 { 590 int dimm_size, cycle_time, bits, tAA, i, speed, freq; 591 592 aprint_naive("\n"); 593 aprint_normal("\n"); 594 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 595 596 aprint_normal("%s, %s, ", 597 (s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)? 598 " (registered)":"", 599 (s->sm_config < __arraycount(spdmem_parity_types))? 600 spdmem_parity_types[s->sm_config]:"invalid parity"); 601 602 dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17); 603 dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip; 604 605 cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 + 606 s->sm_sdr.sdr_cycle_tenths * 100; 607 bits = le16toh(s->sm_sdr.sdr_datawidth); 608 if (s->sm_config == 1 || s->sm_config == 2) 609 bits -= 8; 610 611 /* Calculate speed here - from OpenBSD */ 612 if (spd_len >= 128) 613 freq = ((uint8_t *)s)[126]; 614 else 615 freq = 0; 616 switch (freq) { 617 /* 618 * Must check cycle time since some PC-133 DIMMs 619 * actually report PC-100 620 */ 621 case 100: 622 case 133: 623 if (cycle_time < 8000) 624 speed = 133; 625 else 626 speed = 100; 627 break; 628 case 0x66: /* Legacy DIMMs use _hex_ 66! */ 629 default: 630 speed = 66; 631 } 632 decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE, 633 "PC", speed); 634 635 aprint_verbose_dev(self, 636 "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n", 637 s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks, 638 s->sm_sdr.sdr_banks_per_chip, cycle_time/1000, 639 (cycle_time % 1000) / 100); 640 641 tAA = 0; 642 for (i = 0; i < 8; i++) 643 if (s->sm_sdr.sdr_tCAS & (1 << i)) 644 tAA = i; 645 tAA++; 646 aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD, 647 s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS); 648 649 decode_voltage_refresh(self, s); 650 } 651 652 static void 653 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s) 654 { 655 int dimm_size, cycle_time, bits, tAA, i; 656 657 aprint_naive("\n"); 658 aprint_normal("\n"); 659 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 660 661 aprint_normal("%s, %s, ", 662 (s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)? 663 " (registered)":"", 664 (s->sm_config < __arraycount(spdmem_parity_types))? 665 spdmem_parity_types[s->sm_config]:"invalid parity"); 666 667 dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17); 668 dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip; 669 670 cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 + 671 spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths]; 672 bits = le16toh(s->sm_ddr.ddr_datawidth); 673 if (s->sm_config == 1 || s->sm_config == 2) 674 bits -= 8; 675 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 676 "PC", 0); 677 678 aprint_verbose_dev(self, 679 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n", 680 s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks, 681 s->sm_ddr.ddr_banks_per_chip, cycle_time/1000, 682 (cycle_time % 1000 + 50) / 100); 683 684 tAA = 0; 685 for (i = 2; i < 8; i++) 686 if (s->sm_ddr.ddr_tCAS & (1 << i)) 687 tAA = i; 688 tAA /= 2; 689 690 #define __DDR_ROUND(scale, field) \ 691 ((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time) 692 693 aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD), 694 __DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS)); 695 696 #undef __DDR_ROUND 697 698 decode_voltage_refresh(self, s); 699 } 700 701 static void 702 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s) 703 { 704 int dimm_size, cycle_time, bits, tAA, i; 705 706 aprint_naive("\n"); 707 aprint_normal("\n"); 708 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 709 710 aprint_normal("%s, %s, ", 711 (s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)? 712 " (registered)":"", 713 (s->sm_config < __arraycount(spdmem_parity_types))? 714 spdmem_parity_types[s->sm_config]:"invalid parity"); 715 716 dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17); 717 dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) * 718 s->sm_ddr2.ddr2_banks_per_chip; 719 720 cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 + 721 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac]; 722 bits = s->sm_ddr2.ddr2_datawidth; 723 if ((s->sm_config & 0x03) != 0) 724 bits -= 8; 725 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 726 "PC2", 0); 727 728 aprint_verbose_dev(self, 729 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n", 730 s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols, 731 s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip, 732 cycle_time / 1000, (cycle_time % 1000 + 5) /10 ); 733 734 tAA = 0; 735 for (i = 2; i < 8; i++) 736 if (s->sm_ddr2.ddr2_tCAS & (1 << i)) 737 tAA = i; 738 739 #define __DDR2_ROUND(scale, field) \ 740 ((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time) 741 742 aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD), 743 __DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS)); 744 745 #undef __DDR_ROUND 746 747 decode_voltage_refresh(self, s); 748 } 749 750 static void 751 print_part(const char *part, size_t pnsize) 752 { 753 const char *p = memchr(part, ' ', pnsize); 754 if (p == NULL) 755 p = part + pnsize; 756 aprint_normal(": %.*s\n", (int)(p - part), part); 757 } 758 759 static u_int 760 ddr3_value_pico(struct spdmem *s, uint8_t txx_mtb, uint8_t txx_ftb) 761 { 762 u_int mtb, ftb; /* in picoseconds */ 763 intmax_t signed_txx_ftb; 764 u_int val; 765 766 mtb = (u_int)s->sm_ddr3.ddr3_mtb_dividend * 1000 / 767 s->sm_ddr3.ddr3_mtb_divisor; 768 ftb = (u_int)s->sm_ddr3.ddr3_ftb_dividend * 1000 / 769 s->sm_ddr3.ddr3_ftb_divisor; 770 771 /* tXX_ftb is signed value */ 772 signed_txx_ftb = (int8_t)txx_ftb; 773 val = txx_mtb * mtb + 774 ((txx_ftb > 127) ? signed_txx_ftb : txx_ftb) * ftb / 1000; 775 776 return val; 777 } 778 779 #define __DDR3_VALUE_PICO(s, field) \ 780 ddr3_value_pico(s, s->sm_ddr3.ddr3_##field##_mtb, \ 781 s->sm_ddr3.ddr3_##field##_ftb) 782 783 static void 784 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s) 785 { 786 int dimm_size, cycle_time, bits; 787 788 aprint_naive("\n"); 789 print_part(s->sm_ddr3.ddr3_part, sizeof(s->sm_ddr3.ddr3_part)); 790 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 791 792 if (s->sm_ddr3.ddr3_mod_type == 793 SPDMEM_DDR3_TYPE_MINI_RDIMM || 794 s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM) 795 aprint_normal(" (registered)"); 796 aprint_normal(", %sECC, %stemp-sensor, ", 797 (s->sm_ddr3.ddr3_hasECC)?"":"no ", 798 (s->sm_ddr3.ddr3_has_therm_sensor)?"":"no "); 799 800 /* 801 * DDR3 size specification is quite different from others 802 * 803 * Module capacity is defined as 804 * Chip_Capacity_in_bits / 8bits-per-byte * 805 * external_bus_width / internal_bus_width 806 * We further divide by 2**20 to get our answer in MB 807 */ 808 dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 + 809 (s->sm_ddr3.ddr3_datawidth + 3) - 810 (s->sm_ddr3.ddr3_chipwidth + 2); 811 dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1); 812 813 cycle_time = __DDR3_VALUE_PICO(s, tCKmin); 814 bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3); 815 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE, 816 "PC3", 0); 817 818 aprint_verbose_dev(self, 819 "%d rows, %d cols, %d log. banks, %d phys. banks, " 820 "%d.%03dns cycle time\n", 821 s->sm_ddr3.ddr3_rows + 12, s->sm_ddr3.ddr3_cols + 9, 822 1 << (s->sm_ddr3.ddr3_logbanks + 3), 823 s->sm_ddr3.ddr3_physbanks + 1, 824 cycle_time/1000, cycle_time % 1000); 825 826 #define __DDR3_CYCLES(val) \ 827 ((val / cycle_time) + ((val % cycle_time) ? 1 : 0)) 828 829 aprint_verbose_dev(self, LATENCY, 830 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tAAmin)), 831 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRCDmin)), 832 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRPmin)), 833 __DDR3_CYCLES((s->sm_ddr3.ddr3_tRAS_msb * 256 834 + s->sm_ddr3.ddr3_tRAS_lsb) * s->sm_ddr3.ddr3_mtb_dividend 835 / s->sm_ddr3.ddr3_mtb_divisor * 1000)); 836 837 #undef __DDR3_CYCLES 838 839 /* For DDR3, Voltage is written in another area */ 840 if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V 841 || s->sm_ddr3.ddr3_125V) { 842 aprint_verbose("%s:", device_xname(self)); 843 if (!s->sm_ddr3.ddr3_NOT15V) 844 aprint_verbose(" 1.5V"); 845 if (s->sm_ddr3.ddr3_135V) 846 aprint_verbose(" 1.35V"); 847 if (s->sm_ddr3.ddr3_125V) 848 aprint_verbose(" 1.25V"); 849 aprint_verbose(" operable\n"); 850 } 851 } 852 853 static void 854 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s) 855 { 856 int dimm_size, cycle_time, bits; 857 858 aprint_naive("\n"); 859 aprint_normal("\n"); 860 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 861 862 /* 863 * FB-DIMM module size calculation is very much like DDR3 864 */ 865 dimm_size = s->sm_fbd.fbdimm_rows + 12 + 866 s->sm_fbd.fbdimm_cols + 9 - 20 - 3; 867 dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2)); 868 869 cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend + 870 (s->sm_fbd.fbdimm_mtb_divisor / 2)) / 871 s->sm_fbd.fbdimm_mtb_divisor; 872 bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2); 873 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE, 874 "PC2", 0); 875 876 aprint_verbose_dev(self, 877 "%d rows, %d cols, %d banks, %d.%02dns cycle time\n", 878 s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols, 879 1 << (s->sm_fbd.fbdimm_banks + 2), 880 cycle_time / 1000, (cycle_time % 1000 + 5) /10 ); 881 882 #define __FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin) 883 884 aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin), 885 __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin), 886 (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) / 887 s->sm_fbd.fbdimm_tCKmin); 888 889 #undef __FBDIMM_CYCLES 890 891 decode_voltage_refresh(self, s); 892 } 893 894 static void 895 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s) 896 { 897 int dimm_size, cycle_time, ranks; 898 int tAA_clocks, tRCD_clocks, tRP_clocks, tRAS_clocks; 899 900 aprint_naive("\n"); 901 print_part(s->sm_ddr4.ddr4_part_number, 902 sizeof(s->sm_ddr4.ddr4_part_number)); 903 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]); 904 if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types)) 905 aprint_normal(" (%s)", 906 spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]); 907 aprint_normal(", %sECC, %stemp-sensor, ", 908 (s->sm_ddr4.ddr4_bus_width_extension) ? "" : "no ", 909 (s->sm_ddr4.ddr4_has_therm_sensor) ? "" : "no "); 910 911 /* 912 * DDR4 size calculation from JEDEC spec 913 * 914 * Module capacity in bytes is defined as 915 * Chip_Capacity_in_bits / 8bits-per-byte * 916 * primary_bus_width / DRAM_width * 917 * logical_ranks_per_DIMM 918 * 919 * logical_ranks_per DIMM equals package_ranks, but multiply 920 * by diecount for 3DS packages 921 * 922 * We further divide by 2**20 to get our answer in MB 923 */ 924 dimm_size = (s->sm_ddr4.ddr4_capacity + 28) /* chip_capacity */ 925 - 20 /* convert to MB */ 926 - 3 /* bits --> bytes */ 927 + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */ 928 switch (s->sm_ddr4.ddr4_device_width) { /* DRAM width */ 929 case 0: dimm_size -= 2; 930 break; 931 case 1: dimm_size -= 3; 932 break; 933 case 2: dimm_size -= 4; 934 break; 935 case 4: dimm_size -= 5; 936 break; 937 default: 938 dimm_size = -1; /* flag invalid value */ 939 } 940 if (dimm_size >= 0) { 941 dimm_size = (1 << dimm_size) * 942 (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */ 943 if (s->sm_ddr4.ddr4_signal_loading == 2) { 944 dimm_size *= (s->sm_ddr4.ddr4_diecount + 1); 945 } 946 } 947 948 /* 949 * Note that the ddr4_xxx_ftb fields are actually signed offsets from 950 * the corresponding mtb value, so we might have to subtract 256! 951 */ 952 #define __DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 + \ 953 s->sm_ddr4.ddr4_##field##_ftb) - \ 954 ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0)) 955 /* 956 * For now, the only value for mtb is 0 = 125ps, and ftb = 1ps 957 * so we don't need to figure out the time-base units - just 958 * hard-code them for now. 959 */ 960 cycle_time = __DDR4_VALUE(tCKAVGmin); 961 decode_size_speed(self, node, dimm_size, cycle_time, 2, 962 1 << (s->sm_ddr4.ddr4_primary_bus_width + 3), 963 TRUE, "PC4", 0); 964 965 ranks = s->sm_ddr4.ddr4_package_ranks + 1; 966 aprint_verbose_dev(self, 967 "%d rows, %d cols, %d ranks%s, %d banks/group, %d bank groups\n", 968 s->sm_ddr4.ddr4_rows + 12, s->sm_ddr4.ddr4_cols + 9, 969 ranks, (ranks > 1) ? ((s->sm_ddr4.ddr4_rank_mix == 1) 970 ? " (asymmetric)" : " (symmetric)") : "", 971 1 << (2 + s->sm_ddr4.ddr4_logbanks), 972 1 << s->sm_ddr4.ddr4_bankgroups); 973 974 aprint_verbose_dev(self, "%d.%03dns cycle time\n", 975 cycle_time / 1000, cycle_time % 1000); 976 977 tAA_clocks = __DDR4_VALUE(tAAmin) * 1000 / cycle_time; 978 tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time; 979 tRP_clocks = __DDR4_VALUE(tRPmin) * 1000 / cycle_time; 980 tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 + 981 s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time; 982 983 /* 984 * Per JEDEC spec, rounding is done by taking the time value, dividing 985 * by the cycle time, subtracting .010 from the result, and then 986 * rounded up to the nearest integer. Unfortunately, none of their 987 * examples say what to do when the result of the subtraction is already 988 * an integer. For now, assume that we still round up (so an interval 989 * of exactly 12.010 clock cycles will be printed as 13). 990 */ 991 #define __DDR4_ROUND(value) ((value - 10) / 1000 + 1) 992 993 aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks), 994 __DDR4_ROUND(tRCD_clocks), 995 __DDR4_ROUND(tRP_clocks), 996 __DDR4_ROUND(tRAS_clocks)); 997 998 #undef __DDR4_VALUE 999 #undef __DDR4_ROUND 1000 } 1001