spdmem.c revision 1.36 1 /* $NetBSD: spdmem.c,v 1.36 2022/01/29 08:14:24 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2007 Nicolas Joly
5 * Copyright (c) 2007 Paul Goyette
6 * Copyright (c) 2007 Tobias Nygren
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Serial Presence Detect (SPD) memory identification
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.36 2022/01/29 08:14:24 msaitoh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66
67 static const char* const spdmem_basic_types[] = {
68 "unknown",
69 "FPM",
70 "EDO",
71 "Pipelined Nibble",
72 "SDRAM",
73 "ROM",
74 "DDR SGRAM",
75 "DDR SDRAM",
76 "DDR2 SDRAM",
77 "DDR2 SDRAM FB",
78 "DDR2 SDRAM FB Probe",
79 "DDR3 SDRAM",
80 "DDR4 SDRAM",
81 "unknown",
82 "DDR4E SDRAM",
83 "LPDDR3 SDRAM",
84 "LPDDR4 SDRAM"
85 "LPDDR4X SDRAM",
86 "DDR5 SDRAM",
87 };
88
89 static const char* const spdmem_ddr4_module_types[] = {
90 "DDR4 Extended",
91 "DDR4 RDIMM",
92 "DDR4 UDIMM",
93 "DDR4 SO-DIMM",
94 "DDR4 Load-Reduced DIMM",
95 "DDR4 Mini-RDIMM",
96 "DDR4 Mini-UDIMM",
97 "DDR4 Reserved",
98 "DDR4 72Bit SO-RDIMM",
99 "DDR4 72Bit SO-UDIMM",
100 "DDR4 Undefined",
101 "DDR4 Reserved",
102 "DDR4 16Bit SO-DIMM",
103 "DDR4 32Bit SO-DIMM",
104 "DDR4 Reserved",
105 "DDR4 Undefined"
106 };
107
108 static const char* const spdmem_superset_types[] = {
109 "unknown",
110 "ESDRAM",
111 "DDR ESDRAM",
112 "PEM EDO",
113 "PEM SDRAM"
114 };
115
116 static const char* const spdmem_voltage_types[] = {
117 "TTL (5V tolerant)",
118 "LvTTL (not 5V tolerant)",
119 "HSTL 1.5V",
120 "SSTL 3.3V",
121 "SSTL 2.5V",
122 "SSTL 1.8V"
123 };
124
125 static const char* const spdmem_refresh_types[] = {
126 "15.625us",
127 "3.9us",
128 "7.8us",
129 "31.3us",
130 "62.5us",
131 "125us"
132 };
133
134 static const char* const spdmem_parity_types[] = {
135 "no parity or ECC",
136 "data parity",
137 "data ECC",
138 "data parity and ECC",
139 "cmd/addr parity",
140 "cmd/addr/data parity",
141 "cmd/addr parity, data ECC",
142 "cmd/addr/data parity, data ECC"
143 };
144
145 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
146
147
148 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
149 static const uint16_t spdmem_cycle_frac[] = {
150 0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
151 250, 333, 667, 750, 999, 999
152 };
153
154 /* Format string for timing info */
155 #define LATENCY "tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
156
157 /* CRC functions used for certain memory types */
158
159 static uint16_t
160 spdcrc16(struct spdmem_softc *sc, int count)
161 {
162 uint16_t crc;
163 int i, j;
164 uint8_t val;
165 crc = 0;
166 for (j = 0; j <= count; j++) {
167 (sc->sc_read)(sc, j, &val);
168 crc = crc ^ val << 8;
169 for (i = 0; i < 8; ++i)
170 if (crc & 0x8000)
171 crc = crc << 1 ^ 0x1021;
172 else
173 crc = crc << 1;
174 }
175 return (crc & 0xFFFF);
176 }
177
178 int
179 spdmem_common_probe(struct spdmem_softc *sc)
180 {
181 int cksum = 0;
182 uint8_t i, val, spd_type;
183 int spd_len, spd_crc_cover;
184 uint16_t crc_calc, crc_spd;
185
186 /* Read failed means a device doesn't exist */
187 if ((sc->sc_read)(sc, 2, &spd_type) != 0)
188 return 0;
189
190 /* Memory type should not be 0 */
191 if (spd_type == 0x00)
192 return 0;
193
194 /* For older memory types, validate the checksum over 1st 63 bytes */
195 if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
196 for (i = 0; i < 63; i++) {
197 (sc->sc_read)(sc, i, &val);
198 cksum += val;
199 }
200
201 (sc->sc_read)(sc, 63, &val);
202
203 if ((cksum & 0xff) != val) {
204 aprint_debug("spd checksum failed, calc = 0x%02x, "
205 "spd = 0x%02x\n", cksum, val);
206 return 0;
207 } else
208 return 1;
209 }
210
211 /* For DDR3 and FBDIMM, verify the CRC */
212 else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
213 (sc->sc_read)(sc, 0, &val);
214 spd_len = val;
215 if (spd_len & SPDMEM_SPDCRC_116)
216 spd_crc_cover = 116;
217 else
218 spd_crc_cover = 125;
219 switch (spd_len & SPDMEM_SPDLEN_MASK) {
220 case SPDMEM_SPDLEN_128:
221 spd_len = 128;
222 break;
223 case SPDMEM_SPDLEN_176:
224 spd_len = 176;
225 break;
226 case SPDMEM_SPDLEN_256:
227 spd_len = 256;
228 break;
229 default:
230 return 0;
231 }
232 if (spd_crc_cover > spd_len)
233 return 0;
234 crc_calc = spdcrc16(sc, spd_crc_cover);
235 (sc->sc_read)(sc, 127, &val);
236 crc_spd = val << 8;
237 (sc->sc_read)(sc, 126, &val);
238 crc_spd |= val;
239 if (crc_calc != crc_spd) {
240 aprint_debug("crc16 failed, covers %d bytes, "
241 "calc = 0x%04x, spd = 0x%04x\n",
242 spd_crc_cover, crc_calc, crc_spd);
243 return 0;
244 }
245 return 1;
246 } else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
247 (sc->sc_read)(sc, 0, &val);
248 spd_len = val & 0x0f;
249 if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes))
250 return 0;
251 spd_len = spd_rom_sizes[spd_len];
252 spd_crc_cover = 125; /* For byte 0 to 125 */
253 if (spd_crc_cover > spd_len)
254 return 0;
255 crc_calc = spdcrc16(sc, spd_crc_cover);
256 (sc->sc_read)(sc, 127, &val);
257 crc_spd = val << 8;
258 (sc->sc_read)(sc, 126, &val);
259 crc_spd |= val;
260 if (crc_calc != crc_spd) {
261 aprint_debug("crc16 failed, covers %d bytes, "
262 "calc = 0x%04x, spd = 0x%04x\n",
263 spd_crc_cover, crc_calc, crc_spd);
264 return 0;
265 }
266 /*
267 * We probably could also verify the CRC for the other
268 * "pages" of SPD data in blocks 1 and 2, but we'll do
269 * it some other time.
270 */
271 return 1;
272 }
273
274 /* For unrecognized memory types, don't match at all */
275 return 0;
276 }
277
278 void
279 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
280 {
281 struct spdmem *s = &(sc->sc_spd_data);
282 const char *type;
283 const char *rambus_rev = "Reserved";
284 int dimm_size;
285 unsigned int i, spd_len, spd_size;
286 const struct sysctlnode *node = NULL;
287
288 (sc->sc_read)(sc, 0, &s->sm_len);
289 (sc->sc_read)(sc, 1, &s->sm_size);
290 (sc->sc_read)(sc, 2, &s->sm_type);
291
292 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
293 /*
294 * An even newer encoding with one byte holding both
295 * the used-size and capacity values
296 */
297 spd_len = s->sm_len & 0x0f;
298 spd_size = (s->sm_len >> 4) & 0x07;
299
300 spd_len = spd_rom_sizes[spd_len];
301 spd_size *= 512;
302
303 } else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
304 /*
305 * FBDIMM and DDR3 (and probably all newer) have a different
306 * encoding of the SPD EEPROM used/total sizes
307 */
308 spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
309 switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
310 case SPDMEM_SPDLEN_128:
311 spd_len = 128;
312 break;
313 case SPDMEM_SPDLEN_176:
314 spd_len = 176;
315 break;
316 case SPDMEM_SPDLEN_256:
317 spd_len = 256;
318 break;
319 default:
320 spd_len = 64;
321 break;
322 }
323 } else {
324 spd_size = 1 << s->sm_size;
325 spd_len = s->sm_len;
326 if (spd_len < 64)
327 spd_len = 64;
328 }
329 if (spd_len > spd_size)
330 spd_len = spd_size;
331 if (spd_len > sizeof(struct spdmem))
332 spd_len = sizeof(struct spdmem);
333 for (i = 3; i < spd_len; i++)
334 (sc->sc_read)(sc, i, &((uint8_t *)s)[i]);
335
336 /*
337 * Setup our sysctl subtree, hw.spdmemN
338 */
339 sc->sc_sysctl_log = NULL;
340 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
341 0, CTLTYPE_NODE,
342 device_xname(self), NULL, NULL, 0, NULL, 0,
343 CTL_HW, CTL_CREATE, CTL_EOL);
344 if (node != NULL && spd_len != 0)
345 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
346 0,
347 CTLTYPE_STRUCT, "spd_data",
348 SYSCTL_DESCR("raw spd data"), NULL,
349 0, s, spd_len,
350 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
351
352 /*
353 * Decode and print key SPD contents
354 */
355 if (IS_RAMBUS_TYPE) {
356 if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
357 type = "Rambus";
358 else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
359 type = "Direct Rambus";
360 else
361 type = "Rambus (unknown)";
362
363 switch (s->sm_len) {
364 case 0:
365 rambus_rev = "Invalid";
366 break;
367 case 1:
368 rambus_rev = "0.7";
369 break;
370 case 2:
371 rambus_rev = "1.0";
372 break;
373 default:
374 rambus_rev = "Reserved";
375 break;
376 }
377 } else {
378 if (s->sm_type < __arraycount(spdmem_basic_types))
379 type = spdmem_basic_types[s->sm_type];
380 else
381 type = "unknown memory type";
382
383 if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
384 s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
385 type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
386 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
387 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
388 type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
389 if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
390 s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
391 type =
392 spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
393 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
394 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
395 type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
396 }
397 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
398 s->sm_ddr4.ddr4_mod_type <
399 __arraycount(spdmem_ddr4_module_types)) {
400 type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
401 }
402 }
403
404 strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
405
406 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
407 /*
408 * The latest spec (DDR4 SPD Document Release 3) defines
409 * NVDIMM Hybrid only.
410 */
411 if ((s->sm_ddr4.ddr4_hybrid)
412 && (s->sm_ddr4.ddr4_hybrid_media == 1))
413 strlcat(sc->sc_type, " NVDIMM hybrid",
414 SPDMEM_TYPE_MAXLEN);
415 }
416
417 if (node != NULL)
418 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
419 0,
420 CTLTYPE_STRING, "mem_type",
421 SYSCTL_DESCR("memory module type"), NULL,
422 0, sc->sc_type, 0,
423 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
424
425 if (IS_RAMBUS_TYPE) {
426 aprint_naive("\n");
427 aprint_normal("\n");
428 aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
429 dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
430 if (dimm_size >= 1024)
431 aprint_normal(", %dGB\n", dimm_size / 1024);
432 else
433 aprint_normal(", %dMB\n", dimm_size);
434
435 /* No further decode for RAMBUS memory */
436 return;
437 }
438 switch (s->sm_type) {
439 case SPDMEM_MEMTYPE_EDO:
440 case SPDMEM_MEMTYPE_FPM:
441 decode_edofpm(node, self, s);
442 break;
443 case SPDMEM_MEMTYPE_ROM:
444 decode_rom(node, self, s);
445 break;
446 case SPDMEM_MEMTYPE_SDRAM:
447 decode_sdram(node, self, s, spd_len);
448 break;
449 case SPDMEM_MEMTYPE_DDRSDRAM:
450 decode_ddr(node, self, s);
451 break;
452 case SPDMEM_MEMTYPE_DDR2SDRAM:
453 decode_ddr2(node, self, s);
454 break;
455 case SPDMEM_MEMTYPE_DDR3SDRAM:
456 decode_ddr3(node, self, s);
457 break;
458 case SPDMEM_MEMTYPE_FBDIMM:
459 case SPDMEM_MEMTYPE_FBDIMM_PROBE:
460 decode_fbdimm(node, self, s);
461 break;
462 case SPDMEM_MEMTYPE_DDR4SDRAM:
463 decode_ddr4(node, self, s);
464 break;
465 }
466
467 /* Dump SPD */
468 for (i = 0; i < spd_len; i += 16) {
469 unsigned int j, k;
470 aprint_debug_dev(self, "0x%02x:", i);
471 k = (spd_len > (i + 16)) ? i + 16 : spd_len;
472 for (j = i; j < k; j++)
473 aprint_debug(" %02x", ((uint8_t *)s)[j]);
474 aprint_debug("\n");
475 }
476 }
477
478 int
479 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
480 {
481 sysctl_teardown(&sc->sc_sysctl_log);
482
483 return 0;
484 }
485
486 static void
487 decode_size_speed(device_t self, const struct sysctlnode *node,
488 int dimm_size, int cycle_time, int d_clk, int bits,
489 bool round, const char *ddr_type_string, int speed)
490 {
491 int p_clk;
492 struct spdmem_softc *sc = device_private(self);
493
494 if (dimm_size < 1024)
495 aprint_normal("%dMB", dimm_size);
496 else
497 aprint_normal("%dGB", dimm_size / 1024);
498 if (node != NULL)
499 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
500 CTLFLAG_IMMEDIATE,
501 CTLTYPE_INT, "size",
502 SYSCTL_DESCR("module size in MB"), NULL,
503 dimm_size, NULL, 0,
504 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
505
506 if (cycle_time == 0) {
507 aprint_normal("\n");
508 return;
509 }
510
511 /*
512 * Calculate p_clk first, since for DDR3 we need maximum significance.
513 * DDR3 rating is not rounded to a multiple of 100. This results in
514 * cycle_time of 1.5ns displayed as PC3-10666.
515 *
516 * For SDRAM, the speed is provided by the caller so we use it.
517 */
518 d_clk *= 1000 * 1000;
519 if (speed)
520 p_clk = speed;
521 else
522 p_clk = (d_clk * bits) / 8 / cycle_time;
523 d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
524 if (round) {
525 if ((p_clk % 100) >= 50)
526 p_clk += 50;
527 p_clk -= p_clk % 100;
528 }
529 aprint_normal(", %dMHz (%s-%d)\n",
530 d_clk, ddr_type_string, p_clk);
531 if (node != NULL)
532 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
533 CTLFLAG_IMMEDIATE,
534 CTLTYPE_INT, "speed",
535 SYSCTL_DESCR("memory speed in MHz"),
536 NULL, d_clk, NULL, 0,
537 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
538 }
539
540 static void
541 decode_voltage_refresh(device_t self, struct spdmem *s)
542 {
543 const char *voltage, *refresh;
544
545 if (s->sm_voltage < __arraycount(spdmem_voltage_types))
546 voltage = spdmem_voltage_types[s->sm_voltage];
547 else
548 voltage = "unknown";
549
550 if (s->sm_refresh < __arraycount(spdmem_refresh_types))
551 refresh = spdmem_refresh_types[s->sm_refresh];
552 else
553 refresh = "unknown";
554
555 aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
556 voltage, refresh,
557 s->sm_selfrefresh?" (self-refreshing)":"");
558 }
559
560 static void
561 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
562 {
563
564 aprint_naive("\n");
565 aprint_normal("\n");
566 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
567
568 aprint_normal("\n");
569 aprint_verbose_dev(self,
570 "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
571 s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
572 s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
573 }
574
575 static void
576 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
577 {
578
579 aprint_naive("\n");
580 aprint_normal("\n");
581 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
582
583 aprint_normal("\n");
584 aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
585 s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
586 }
587
588 static void
589 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
590 int spd_len)
591 {
592 int dimm_size, cycle_time, bits, tAA, i, speed, freq;
593
594 aprint_naive("\n");
595 aprint_normal("\n");
596 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
597
598 aprint_normal("%s, %s, ",
599 (s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
600 " (registered)":"",
601 (s->sm_config < __arraycount(spdmem_parity_types))?
602 spdmem_parity_types[s->sm_config]:"invalid parity");
603
604 dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
605 dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
606
607 cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
608 s->sm_sdr.sdr_cycle_tenths * 100;
609 bits = le16toh(s->sm_sdr.sdr_datawidth);
610 if (s->sm_config == 1 || s->sm_config == 2)
611 bits -= 8;
612
613 /* Calculate speed here - from OpenBSD */
614 if (spd_len >= 128)
615 freq = ((uint8_t *)s)[126];
616 else
617 freq = 0;
618 switch (freq) {
619 /*
620 * Must check cycle time since some PC-133 DIMMs
621 * actually report PC-100
622 */
623 case 100:
624 case 133:
625 if (cycle_time < 8000)
626 speed = 133;
627 else
628 speed = 100;
629 break;
630 case 0x66: /* Legacy DIMMs use _hex_ 66! */
631 default:
632 speed = 66;
633 }
634 decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
635 "PC", speed);
636
637 aprint_verbose_dev(self,
638 "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
639 s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
640 s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
641 (cycle_time % 1000) / 100);
642
643 tAA = 0;
644 for (i = 0; i < 8; i++)
645 if (s->sm_sdr.sdr_tCAS & (1 << i))
646 tAA = i;
647 tAA++;
648 aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
649 s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
650
651 decode_voltage_refresh(self, s);
652 }
653
654 static void
655 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
656 {
657 int dimm_size, cycle_time, bits, tAA, i;
658
659 aprint_naive("\n");
660 aprint_normal("\n");
661 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
662
663 aprint_normal("%s, %s, ",
664 (s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
665 " (registered)":"",
666 (s->sm_config < __arraycount(spdmem_parity_types))?
667 spdmem_parity_types[s->sm_config]:"invalid parity");
668
669 dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
670 dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
671
672 cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
673 spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
674 bits = le16toh(s->sm_ddr.ddr_datawidth);
675 if (s->sm_config == 1 || s->sm_config == 2)
676 bits -= 8;
677 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
678 "PC", 0);
679
680 aprint_verbose_dev(self,
681 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
682 s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
683 s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
684 (cycle_time % 1000 + 50) / 100);
685
686 tAA = 0;
687 for (i = 2; i < 8; i++)
688 if (s->sm_ddr.ddr_tCAS & (1 << i))
689 tAA = i;
690 tAA /= 2;
691
692 #define __DDR_ROUND(scale, field) \
693 ((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
694
695 aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
696 __DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
697
698 #undef __DDR_ROUND
699
700 decode_voltage_refresh(self, s);
701 }
702
703 static void
704 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
705 {
706 int dimm_size, cycle_time, bits, tAA, i;
707
708 aprint_naive("\n");
709 aprint_normal("\n");
710 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
711
712 aprint_normal("%s, %s, ",
713 (s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
714 " (registered)":"",
715 (s->sm_config < __arraycount(spdmem_parity_types))?
716 spdmem_parity_types[s->sm_config]:"invalid parity");
717
718 dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
719 dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
720 s->sm_ddr2.ddr2_banks_per_chip;
721
722 cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
723 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
724 bits = s->sm_ddr2.ddr2_datawidth;
725 if ((s->sm_config & 0x03) != 0)
726 bits -= 8;
727 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
728 "PC2", 0);
729
730 aprint_verbose_dev(self,
731 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
732 s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
733 s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
734 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
735
736 tAA = 0;
737 for (i = 2; i < 8; i++)
738 if (s->sm_ddr2.ddr2_tCAS & (1 << i))
739 tAA = i;
740
741 #define __DDR2_ROUND(scale, field) \
742 ((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
743
744 aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
745 __DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
746
747 #undef __DDR_ROUND
748
749 decode_voltage_refresh(self, s);
750 }
751
752 static void
753 print_part(const char *part, size_t pnsize)
754 {
755 const char *p = memchr(part, ' ', pnsize);
756 if (p == NULL)
757 p = part + pnsize;
758 aprint_normal(": %.*s\n", (int)(p - part), part);
759 }
760
761 static u_int
762 ddr3_value_pico(struct spdmem *s, uint8_t txx_mtb, uint8_t txx_ftb)
763 {
764 u_int mtb, ftb; /* in picoseconds */
765 intmax_t signed_txx_ftb;
766 u_int val;
767
768 mtb = (u_int)s->sm_ddr3.ddr3_mtb_dividend * 1000 /
769 s->sm_ddr3.ddr3_mtb_divisor;
770 ftb = (u_int)s->sm_ddr3.ddr3_ftb_dividend * 1000 /
771 s->sm_ddr3.ddr3_ftb_divisor;
772
773 /* tXX_ftb is signed value */
774 signed_txx_ftb = (int8_t)txx_ftb;
775 val = txx_mtb * mtb +
776 ((txx_ftb > 127) ? signed_txx_ftb : txx_ftb) * ftb / 1000;
777
778 return val;
779 }
780
781 #define __DDR3_VALUE_PICO(s, field) \
782 ddr3_value_pico(s, s->sm_ddr3.ddr3_##field##_mtb, \
783 s->sm_ddr3.ddr3_##field##_ftb)
784
785 static void
786 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
787 {
788 int dimm_size, cycle_time, bits;
789
790 aprint_naive("\n");
791 print_part(s->sm_ddr3.ddr3_part, sizeof(s->sm_ddr3.ddr3_part));
792 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
793
794 if (s->sm_ddr3.ddr3_mod_type ==
795 SPDMEM_DDR3_TYPE_MINI_RDIMM ||
796 s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
797 aprint_normal(" (registered)");
798 aprint_normal(", %sECC, %stemp-sensor, ",
799 (s->sm_ddr3.ddr3_hasECC)?"":"no ",
800 (s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
801
802 /*
803 * DDR3 size specification is quite different from others
804 *
805 * Module capacity is defined as
806 * Chip_Capacity_in_bits / 8bits-per-byte *
807 * external_bus_width / internal_bus_width
808 * We further divide by 2**20 to get our answer in MB
809 */
810 dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
811 (s->sm_ddr3.ddr3_datawidth + 3) -
812 (s->sm_ddr3.ddr3_chipwidth + 2);
813 dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
814
815 cycle_time = __DDR3_VALUE_PICO(s, tCKmin);
816 bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
817 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
818 "PC3", 0);
819
820 aprint_verbose_dev(self,
821 "%d rows, %d cols, %d log. banks, %d phys. banks, "
822 "%d.%03dns cycle time\n",
823 s->sm_ddr3.ddr3_rows + 12, s->sm_ddr3.ddr3_cols + 9,
824 1 << (s->sm_ddr3.ddr3_logbanks + 3),
825 s->sm_ddr3.ddr3_physbanks + 1,
826 cycle_time/1000, cycle_time % 1000);
827
828 #define __DDR3_CYCLES(val) \
829 ((val / cycle_time) + ((val % cycle_time) ? 1 : 0))
830
831 aprint_verbose_dev(self, LATENCY,
832 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tAAmin)),
833 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRCDmin)),
834 __DDR3_CYCLES(__DDR3_VALUE_PICO(s, tRPmin)),
835 __DDR3_CYCLES((s->sm_ddr3.ddr3_tRAS_msb * 256
836 + s->sm_ddr3.ddr3_tRAS_lsb) * s->sm_ddr3.ddr3_mtb_dividend
837 / s->sm_ddr3.ddr3_mtb_divisor * 1000));
838
839 #undef __DDR3_CYCLES
840
841 /* For DDR3, Voltage is written in another area */
842 if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
843 || s->sm_ddr3.ddr3_125V) {
844 aprint_verbose("%s:", device_xname(self));
845 if (!s->sm_ddr3.ddr3_NOT15V)
846 aprint_verbose(" 1.5V");
847 if (s->sm_ddr3.ddr3_135V)
848 aprint_verbose(" 1.35V");
849 if (s->sm_ddr3.ddr3_125V)
850 aprint_verbose(" 1.25V");
851 aprint_verbose(" operable\n");
852 }
853 }
854
855 static void
856 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
857 {
858 int dimm_size, cycle_time, bits;
859
860 aprint_naive("\n");
861 aprint_normal("\n");
862 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
863
864 /*
865 * FB-DIMM module size calculation is very much like DDR3
866 */
867 dimm_size = s->sm_fbd.fbdimm_rows + 12 +
868 s->sm_fbd.fbdimm_cols + 9 - 20 - 3;
869 dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
870
871 cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
872 (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
873 s->sm_fbd.fbdimm_mtb_divisor;
874 bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
875 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
876 "PC2", 0);
877
878 aprint_verbose_dev(self,
879 "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
880 s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
881 1 << (s->sm_fbd.fbdimm_banks + 2),
882 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
883
884 #define __FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
885
886 aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
887 __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
888 (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
889 s->sm_fbd.fbdimm_tCKmin);
890
891 #undef __FBDIMM_CYCLES
892
893 decode_voltage_refresh(self, s);
894 }
895
896 static void
897 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
898 {
899 int dimm_size, cycle_time, ranks;
900 int tAA_clocks, tRCD_clocks, tRP_clocks, tRAS_clocks;
901
902 aprint_naive("\n");
903 print_part(s->sm_ddr4.ddr4_part_number,
904 sizeof(s->sm_ddr4.ddr4_part_number));
905 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
906 if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
907 aprint_normal(" (%s)",
908 spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
909 aprint_normal(", %sECC, %stemp-sensor, ",
910 (s->sm_ddr4.ddr4_bus_width_extension) ? "" : "no ",
911 (s->sm_ddr4.ddr4_has_therm_sensor) ? "" : "no ");
912
913 /*
914 * DDR4 size calculation from JEDEC spec
915 *
916 * Module capacity in bytes is defined as
917 * Chip_Capacity_in_bits / 8bits-per-byte *
918 * primary_bus_width / DRAM_width *
919 * logical_ranks_per_DIMM
920 *
921 * logical_ranks_per DIMM equals package_ranks, but multiply
922 * by diecount for 3DS packages
923 *
924 * We further divide by 2**20 to get our answer in MB
925 */
926 dimm_size = (s->sm_ddr4.ddr4_capacity + 28) /* chip_capacity */
927 - 20 /* convert to MB */
928 - 3 /* bits --> bytes */
929 + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
930 switch (s->sm_ddr4.ddr4_device_width) { /* DRAM width */
931 case 0: dimm_size -= 2;
932 break;
933 case 1: dimm_size -= 3;
934 break;
935 case 2: dimm_size -= 4;
936 break;
937 case 4: dimm_size -= 5;
938 break;
939 default:
940 dimm_size = -1; /* flag invalid value */
941 }
942 if (dimm_size >= 0) {
943 dimm_size = (1 << dimm_size) *
944 (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
945 if (s->sm_ddr4.ddr4_signal_loading == 2) {
946 dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
947 }
948 }
949
950 /*
951 * Note that the ddr4_xxx_ftb fields are actually signed offsets from
952 * the corresponding mtb value, so we might have to subtract 256!
953 */
954 #define __DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 + \
955 s->sm_ddr4.ddr4_##field##_ftb) - \
956 ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
957 /*
958 * For now, the only value for mtb is 0 = 125ps, and ftb = 1ps
959 * so we don't need to figure out the time-base units - just
960 * hard-code them for now.
961 */
962 cycle_time = __DDR4_VALUE(tCKAVGmin);
963 decode_size_speed(self, node, dimm_size, cycle_time, 2,
964 1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
965 TRUE, "PC4", 0);
966
967 ranks = s->sm_ddr4.ddr4_package_ranks + 1;
968 aprint_verbose_dev(self,
969 "%d rows, %d cols, %d ranks%s, %d banks/group, %d bank groups\n",
970 s->sm_ddr4.ddr4_rows + 12, s->sm_ddr4.ddr4_cols + 9,
971 ranks, (ranks > 1) ? ((s->sm_ddr4.ddr4_rank_mix == 1)
972 ? " (asymmetric)" : " (symmetric)") : "",
973 1 << (2 + s->sm_ddr4.ddr4_logbanks),
974 1 << s->sm_ddr4.ddr4_bankgroups);
975
976 aprint_verbose_dev(self, "%d.%03dns cycle time\n",
977 cycle_time / 1000, cycle_time % 1000);
978
979 tAA_clocks = __DDR4_VALUE(tAAmin) * 1000 / cycle_time;
980 tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
981 tRP_clocks = __DDR4_VALUE(tRPmin) * 1000 / cycle_time;
982 tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
983 s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
984
985 /*
986 * Per JEDEC spec, rounding is done by taking the time value, dividing
987 * by the cycle time, subtracting .010 from the result, and then
988 * rounded up to the nearest integer. Unfortunately, none of their
989 * examples say what to do when the result of the subtraction is already
990 * an integer. For now, assume that we still round up (so an interval
991 * of exactly 12.010 clock cycles will be printed as 13).
992 */
993 #define __DDR4_ROUND(value) ((value - 10) / 1000 + 1)
994
995 aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
996 __DDR4_ROUND(tRCD_clocks),
997 __DDR4_ROUND(tRP_clocks),
998 __DDR4_ROUND(tRAS_clocks));
999
1000 #undef __DDR4_VALUE
1001 #undef __DDR4_ROUND
1002 }
1003