spdmem.c revision 1.23 1 /* $NetBSD: spdmem.c,v 1.23 2017/01/11 21:44:50 maya Exp $ */
2
3 /*
4 * Copyright (c) 2007 Nicolas Joly
5 * Copyright (c) 2007 Paul Goyette
6 * Copyright (c) 2007 Tobias Nygren
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Serial Presence Detect (SPD) memory identification
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.23 2017/01/11 21:44:50 maya Exp $");
39
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66
67 static const char* const spdmem_basic_types[] = {
68 "unknown",
69 "FPM",
70 "EDO",
71 "Pipelined Nibble",
72 "SDRAM",
73 "ROM",
74 "DDR SGRAM",
75 "DDR SDRAM",
76 "DDR2 SDRAM",
77 "DDR2 SDRAM FB",
78 "DDR2 SDRAM FB Probe",
79 "DDR3 SDRAM",
80 "DDR4 SDRAM",
81 "unknown",
82 "DDR4E SDRAM",
83 "LPDDR3 SDRAM",
84 "LPDDR4 SDRAM"
85 };
86
87 static const char* const spdmem_ddr4_module_types[] = {
88 "DDR4 Extended",
89 "DDR4 RDIMM",
90 "DDR4 UDIMM",
91 "DDR4 SO-DIMM",
92 "DDR4 Load-Reduced DIMM",
93 "DDR4 Mini-RDIMM",
94 "DDR4 Mini-UDIMM",
95 "DDR4 Reserved",
96 "DDR4 72Bit SO-RDIMM",
97 "DDR4 72Bit SO-UDIMM",
98 "DDR4 Undefined",
99 "DDR4 Reserved",
100 "DDR4 16Bit SO-DIMM",
101 "DDR4 32Bit SO-DIMM",
102 "DDR4 Reserved",
103 "DDR4 Undefined"
104 };
105
106 static const char* const spdmem_superset_types[] = {
107 "unknown",
108 "ESDRAM",
109 "DDR ESDRAM",
110 "PEM EDO",
111 "PEM SDRAM"
112 };
113
114 static const char* const spdmem_voltage_types[] = {
115 "TTL (5V tolerant)",
116 "LvTTL (not 5V tolerant)",
117 "HSTL 1.5V",
118 "SSTL 3.3V",
119 "SSTL 2.5V",
120 "SSTL 1.8V"
121 };
122
123 static const char* const spdmem_refresh_types[] = {
124 "15.625us",
125 "3.9us",
126 "7.8us",
127 "31.3us",
128 "62.5us",
129 "125us"
130 };
131
132 static const char* const spdmem_parity_types[] = {
133 "no parity or ECC",
134 "data parity",
135 "data ECC",
136 "data parity and ECC",
137 "cmd/addr parity",
138 "cmd/addr/data parity",
139 "cmd/addr parity, data ECC",
140 "cmd/addr/data parity, data ECC"
141 };
142
143 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
144
145
146 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
147 static const uint16_t spdmem_cycle_frac[] = {
148 0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
149 250, 333, 667, 750, 999, 999
150 };
151
152 /* Format string for timing info */
153 #define LATENCY "tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
154
155 /* CRC functions used for certain memory types */
156
157 static uint16_t
158 spdcrc16(struct spdmem_softc *sc, int count)
159 {
160 uint16_t crc;
161 int i, j;
162 uint8_t val;
163 crc = 0;
164 for (j = 0; j <= count; j++) {
165 (sc->sc_read)(sc, j, &val);
166 crc = crc ^ val << 8;
167 for (i = 0; i < 8; ++i)
168 if (crc & 0x8000)
169 crc = crc << 1 ^ 0x1021;
170 else
171 crc = crc << 1;
172 }
173 return (crc & 0xFFFF);
174 }
175
176 int
177 spdmem_common_probe(struct spdmem_softc *sc)
178 {
179 int cksum = 0;
180 uint8_t i, val, spd_type;
181 int spd_len, spd_crc_cover;
182 uint16_t crc_calc, crc_spd;
183
184 /* Read failed means a device doesn't exist */
185 if ((sc->sc_read)(sc, 2, &spd_type) != 0)
186 return 0;
187
188 /* Memory type should not be 0 */
189 if (spd_type == 0x00)
190 return 0;
191
192 /* For older memory types, validate the checksum over 1st 63 bytes */
193 if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
194 for (i = 0; i < 63; i++) {
195 (sc->sc_read)(sc, i, &val);
196 cksum += val;
197 }
198
199 (sc->sc_read)(sc, 63, &val);
200
201 if ((cksum & 0xff) != val) {
202 aprint_debug("spd checksum failed, calc = 0x%02x, "
203 "spd = 0x%02x\n", cksum, val);
204 return 0;
205 } else
206 return 1;
207 }
208
209 /* For DDR3 and FBDIMM, verify the CRC */
210 else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
211 (sc->sc_read)(sc, 0, &val);
212 spd_len = val;
213 if (spd_len & SPDMEM_SPDCRC_116)
214 spd_crc_cover = 116;
215 else
216 spd_crc_cover = 125;
217 switch (spd_len & SPDMEM_SPDLEN_MASK) {
218 case SPDMEM_SPDLEN_128:
219 spd_len = 128;
220 break;
221 case SPDMEM_SPDLEN_176:
222 spd_len = 176;
223 break;
224 case SPDMEM_SPDLEN_256:
225 spd_len = 256;
226 break;
227 default:
228 return 0;
229 }
230 if (spd_crc_cover > spd_len)
231 return 0;
232 crc_calc = spdcrc16(sc, spd_crc_cover);
233 (sc->sc_read)(sc, 127, &val);
234 crc_spd = val << 8;
235 (sc->sc_read)(sc, 126, &val);
236 crc_spd |= val;
237 if (crc_calc != crc_spd) {
238 aprint_debug("crc16 failed, covers %d bytes, "
239 "calc = 0x%04x, spd = 0x%04x\n",
240 spd_crc_cover, crc_calc, crc_spd);
241 return 0;
242 }
243 return 1;
244 } else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
245 (sc->sc_read)(sc, 0, &val);
246 spd_len = val & 0x0f;
247 if ((unsigned int)spd_len >= __arraycount(spd_rom_sizes))
248 return 0;
249 spd_len = spd_rom_sizes[spd_len];
250 spd_crc_cover = 125; /* For byte 0 to 125 */
251 if (spd_crc_cover > spd_len)
252 return 0;
253 crc_calc = spdcrc16(sc, spd_crc_cover);
254 (sc->sc_read)(sc, 127, &val);
255 crc_spd = val << 8;
256 (sc->sc_read)(sc, 126, &val);
257 crc_spd |= val;
258 if (crc_calc != crc_spd) {
259 aprint_debug("crc16 failed, covers %d bytes, "
260 "calc = 0x%04x, spd = 0x%04x\n",
261 spd_crc_cover, crc_calc, crc_spd);
262 return 0;
263 }
264 /*
265 * We probably could also verify the CRC for the other
266 * "pages" of SPD data in blocks 1 and 2, but we'll do
267 * it some other time.
268 */
269 return 1;
270 }
271
272 /* For unrecognized memory types, don't match at all */
273 return 0;
274 }
275
276 void
277 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
278 {
279 struct spdmem *s = &(sc->sc_spd_data);
280 const char *type;
281 const char *rambus_rev = "Reserved";
282 int dimm_size;
283 unsigned int i, spd_len, spd_size;
284 const struct sysctlnode *node = NULL;
285
286 (sc->sc_read)(sc, 0, &s->sm_len);
287 (sc->sc_read)(sc, 1, &s->sm_size);
288 (sc->sc_read)(sc, 2, &s->sm_type);
289
290 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
291 /*
292 * An even newer encoding with one byte holding both
293 * the used-size and capacity values
294 */
295 spd_len = s->sm_len & 0x0f;
296 spd_size = (s->sm_len >> 4) & 0x07;
297
298 spd_len = spd_rom_sizes[spd_len];
299 spd_size *= 512;
300
301 } else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
302 /*
303 * FBDIMM and DDR3 (and probably all newer) have a different
304 * encoding of the SPD EEPROM used/total sizes
305 */
306 spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
307 switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
308 case SPDMEM_SPDLEN_128:
309 spd_len = 128;
310 break;
311 case SPDMEM_SPDLEN_176:
312 spd_len = 176;
313 break;
314 case SPDMEM_SPDLEN_256:
315 spd_len = 256;
316 break;
317 default:
318 spd_len = 64;
319 break;
320 }
321 } else {
322 spd_size = 1 << s->sm_size;
323 spd_len = s->sm_len;
324 if (spd_len < 64)
325 spd_len = 64;
326 }
327 if (spd_len > spd_size)
328 spd_len = spd_size;
329 if (spd_len > sizeof(struct spdmem))
330 spd_len = sizeof(struct spdmem);
331 for (i = 3; i < spd_len; i++)
332 (sc->sc_read)(sc, i, &((uint8_t *)s)[i]);
333
334 /*
335 * Setup our sysctl subtree, hw.spdmemN
336 */
337 sc->sc_sysctl_log = NULL;
338 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
339 0, CTLTYPE_NODE,
340 device_xname(self), NULL, NULL, 0, NULL, 0,
341 CTL_HW, CTL_CREATE, CTL_EOL);
342 if (node != NULL && spd_len != 0)
343 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
344 0,
345 CTLTYPE_STRUCT, "spd_data",
346 SYSCTL_DESCR("raw spd data"), NULL,
347 0, s, spd_len,
348 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
349
350 /*
351 * Decode and print key SPD contents
352 */
353 if (IS_RAMBUS_TYPE) {
354 if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
355 type = "Rambus";
356 else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
357 type = "Direct Rambus";
358 else
359 type = "Rambus (unknown)";
360
361 switch (s->sm_len) {
362 case 0:
363 rambus_rev = "Invalid";
364 break;
365 case 1:
366 rambus_rev = "0.7";
367 break;
368 case 2:
369 rambus_rev = "1.0";
370 break;
371 default:
372 rambus_rev = "Reserved";
373 break;
374 }
375 } else {
376 if (s->sm_type < __arraycount(spdmem_basic_types))
377 type = spdmem_basic_types[s->sm_type];
378 else
379 type = "unknown memory type";
380
381 if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
382 s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
383 type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
384 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
385 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
386 type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
387 if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
388 s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
389 type =
390 spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
391 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
392 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
393 type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
394 }
395 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
396 s->sm_ddr4.ddr4_mod_type <
397 __arraycount(spdmem_ddr4_module_types)) {
398 type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
399 }
400 }
401
402 strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
403
404 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
405 /*
406 * The latest spec (DDR4 SPD Document Release 3) defines
407 * NVDIMM Hybrid only.
408 */
409 if ((s->sm_ddr4.ddr4_hybrid)
410 && (s->sm_ddr4.ddr4_hybrid_media == 1))
411 strlcat(sc->sc_type, " NVDIMM hybrid",
412 SPDMEM_TYPE_MAXLEN);
413 }
414
415 if (node != NULL)
416 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
417 0,
418 CTLTYPE_STRING, "mem_type",
419 SYSCTL_DESCR("memory module type"), NULL,
420 0, sc->sc_type, 0,
421 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
422
423 if (IS_RAMBUS_TYPE) {
424 aprint_naive("\n");
425 aprint_normal("\n");
426 aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
427 dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
428 if (dimm_size >= 1024)
429 aprint_normal(", %dGB\n", dimm_size / 1024);
430 else
431 aprint_normal(", %dMB\n", dimm_size);
432
433 /* No further decode for RAMBUS memory */
434 return;
435 }
436 switch (s->sm_type) {
437 case SPDMEM_MEMTYPE_EDO:
438 case SPDMEM_MEMTYPE_FPM:
439 decode_edofpm(node, self, s);
440 break;
441 case SPDMEM_MEMTYPE_ROM:
442 decode_rom(node, self, s);
443 break;
444 case SPDMEM_MEMTYPE_SDRAM:
445 decode_sdram(node, self, s, spd_len);
446 break;
447 case SPDMEM_MEMTYPE_DDRSDRAM:
448 decode_ddr(node, self, s);
449 break;
450 case SPDMEM_MEMTYPE_DDR2SDRAM:
451 decode_ddr2(node, self, s);
452 break;
453 case SPDMEM_MEMTYPE_DDR3SDRAM:
454 decode_ddr3(node, self, s);
455 break;
456 case SPDMEM_MEMTYPE_FBDIMM:
457 case SPDMEM_MEMTYPE_FBDIMM_PROBE:
458 decode_fbdimm(node, self, s);
459 break;
460 case SPDMEM_MEMTYPE_DDR4SDRAM:
461 decode_ddr4(node, self, s);
462 break;
463 }
464
465 /* Dump SPD */
466 for (i = 0; i < spd_len; i += 16) {
467 unsigned int j, k;
468 aprint_debug_dev(self, "0x%02x:", i);
469 k = (spd_len > (i + 16)) ? i + 16 : spd_len;
470 for (j = i; j < k; j++)
471 aprint_debug(" %02x", ((uint8_t *)s)[j]);
472 aprint_debug("\n");
473 }
474 }
475
476 int
477 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
478 {
479 sysctl_teardown(&sc->sc_sysctl_log);
480
481 return 0;
482 }
483
484 static void
485 decode_size_speed(device_t self, const struct sysctlnode *node,
486 int dimm_size, int cycle_time, int d_clk, int bits,
487 bool round, const char *ddr_type_string, int speed)
488 {
489 int p_clk;
490 struct spdmem_softc *sc = device_private(self);
491
492 if (dimm_size < 1024)
493 aprint_normal("%dMB", dimm_size);
494 else
495 aprint_normal("%dGB", dimm_size / 1024);
496 if (node != NULL)
497 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
498 CTLFLAG_IMMEDIATE,
499 CTLTYPE_INT, "size",
500 SYSCTL_DESCR("module size in MB"), NULL,
501 dimm_size, NULL, 0,
502 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
503
504 if (cycle_time == 0) {
505 aprint_normal("\n");
506 return;
507 }
508
509 /*
510 * Calculate p_clk first, since for DDR3 we need maximum significance.
511 * DDR3 rating is not rounded to a multiple of 100. This results in
512 * cycle_time of 1.5ns displayed as PC3-10666.
513 *
514 * For SDRAM, the speed is provided by the caller so we use it.
515 */
516 d_clk *= 1000 * 1000;
517 if (speed)
518 p_clk = speed;
519 else
520 p_clk = (d_clk * bits) / 8 / cycle_time;
521 d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
522 if (round) {
523 if ((p_clk % 100) >= 50)
524 p_clk += 50;
525 p_clk -= p_clk % 100;
526 }
527 aprint_normal(", %dMHz (%s-%d)\n",
528 d_clk, ddr_type_string, p_clk);
529 if (node != NULL)
530 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
531 CTLFLAG_IMMEDIATE,
532 CTLTYPE_INT, "speed",
533 SYSCTL_DESCR("memory speed in MHz"),
534 NULL, d_clk, NULL, 0,
535 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
536 }
537
538 static void
539 decode_voltage_refresh(device_t self, struct spdmem *s)
540 {
541 const char *voltage, *refresh;
542
543 if (s->sm_voltage < __arraycount(spdmem_voltage_types))
544 voltage = spdmem_voltage_types[s->sm_voltage];
545 else
546 voltage = "unknown";
547
548 if (s->sm_refresh < __arraycount(spdmem_refresh_types))
549 refresh = spdmem_refresh_types[s->sm_refresh];
550 else
551 refresh = "unknown";
552
553 aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
554 voltage, refresh,
555 s->sm_selfrefresh?" (self-refreshing)":"");
556 }
557
558 static void
559 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
560 {
561
562 aprint_naive("\n");
563 aprint_normal("\n");
564 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
565
566 aprint_normal("\n");
567 aprint_verbose_dev(self,
568 "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
569 s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
570 s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
571 }
572
573 static void
574 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
575 {
576
577 aprint_naive("\n");
578 aprint_normal("\n");
579 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
580
581 aprint_normal("\n");
582 aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
583 s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
584 }
585
586 static void
587 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
588 int spd_len)
589 {
590 int dimm_size, cycle_time, bits, tAA, i, speed, freq;
591
592 aprint_naive("\n");
593 aprint_normal("\n");
594 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
595
596 aprint_normal("%s, %s, ",
597 (s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
598 " (registered)":"",
599 (s->sm_config < __arraycount(spdmem_parity_types))?
600 spdmem_parity_types[s->sm_config]:"invalid parity");
601
602 dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
603 dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
604
605 cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
606 s->sm_sdr.sdr_cycle_tenths * 100;
607 bits = le16toh(s->sm_sdr.sdr_datawidth);
608 if (s->sm_config == 1 || s->sm_config == 2)
609 bits -= 8;
610
611 /* Calculate speed here - from OpenBSD */
612 if (spd_len >= 128)
613 freq = ((uint8_t *)s)[126];
614 else
615 freq = 0;
616 switch (freq) {
617 /*
618 * Must check cycle time since some PC-133 DIMMs
619 * actually report PC-100
620 */
621 case 100:
622 case 133:
623 if (cycle_time < 8000)
624 speed = 133;
625 else
626 speed = 100;
627 break;
628 case 0x66: /* Legacy DIMMs use _hex_ 66! */
629 default:
630 speed = 66;
631 }
632 decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
633 "PC", speed);
634
635 aprint_verbose_dev(self,
636 "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
637 s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
638 s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
639 (cycle_time % 1000) / 100);
640
641 tAA = 0;
642 for (i = 0; i < 8; i++)
643 if (s->sm_sdr.sdr_tCAS & (1 << i))
644 tAA = i;
645 tAA++;
646 aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
647 s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
648
649 decode_voltage_refresh(self, s);
650 }
651
652 static void
653 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
654 {
655 int dimm_size, cycle_time, bits, tAA, i;
656
657 aprint_naive("\n");
658 aprint_normal("\n");
659 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
660
661 aprint_normal("%s, %s, ",
662 (s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
663 " (registered)":"",
664 (s->sm_config < __arraycount(spdmem_parity_types))?
665 spdmem_parity_types[s->sm_config]:"invalid parity");
666
667 dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
668 dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
669
670 cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
671 spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
672 bits = le16toh(s->sm_ddr.ddr_datawidth);
673 if (s->sm_config == 1 || s->sm_config == 2)
674 bits -= 8;
675 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
676 "PC", 0);
677
678 aprint_verbose_dev(self,
679 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
680 s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
681 s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
682 (cycle_time % 1000 + 50) / 100);
683
684 tAA = 0;
685 for (i = 2; i < 8; i++)
686 if (s->sm_ddr.ddr_tCAS & (1 << i))
687 tAA = i;
688 tAA /= 2;
689
690 #define __DDR_ROUND(scale, field) \
691 ((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
692
693 aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
694 __DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
695
696 #undef __DDR_ROUND
697
698 decode_voltage_refresh(self, s);
699 }
700
701 static void
702 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
703 {
704 int dimm_size, cycle_time, bits, tAA, i;
705
706 aprint_naive("\n");
707 aprint_normal("\n");
708 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
709
710 aprint_normal("%s, %s, ",
711 (s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
712 " (registered)":"",
713 (s->sm_config < __arraycount(spdmem_parity_types))?
714 spdmem_parity_types[s->sm_config]:"invalid parity");
715
716 dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
717 dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
718 s->sm_ddr2.ddr2_banks_per_chip;
719
720 cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
721 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
722 bits = s->sm_ddr2.ddr2_datawidth;
723 if ((s->sm_config & 0x03) != 0)
724 bits -= 8;
725 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
726 "PC2", 0);
727
728 aprint_verbose_dev(self,
729 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
730 s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
731 s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
732 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
733
734 tAA = 0;
735 for (i = 2; i < 8; i++)
736 if (s->sm_ddr2.ddr2_tCAS & (1 << i))
737 tAA = i;
738
739 #define __DDR2_ROUND(scale, field) \
740 ((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
741
742 aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
743 __DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
744
745 #undef __DDR_ROUND
746
747 decode_voltage_refresh(self, s);
748 }
749
750 static void
751 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
752 {
753 int dimm_size, cycle_time, bits;
754
755 aprint_naive("\n");
756 aprint_normal(": %18s\n", s->sm_ddr3.ddr3_part);
757 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
758
759 if (s->sm_ddr3.ddr3_mod_type ==
760 SPDMEM_DDR3_TYPE_MINI_RDIMM ||
761 s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
762 aprint_normal(" (registered)");
763 aprint_normal(", %sECC, %stemp-sensor, ",
764 (s->sm_ddr3.ddr3_hasECC)?"":"no ",
765 (s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
766
767 /*
768 * DDR3 size specification is quite different from others
769 *
770 * Module capacity is defined as
771 * Chip_Capacity_in_bits / 8bits-per-byte *
772 * external_bus_width / internal_bus_width
773 * We further divide by 2**20 to get our answer in MB
774 */
775 dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
776 (s->sm_ddr3.ddr3_datawidth + 3) -
777 (s->sm_ddr3.ddr3_chipwidth + 2);
778 dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
779
780 cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend +
781 (s->sm_ddr3.ddr3_mtb_divisor / 2)) /
782 s->sm_ddr3.ddr3_mtb_divisor;
783 cycle_time *= s->sm_ddr3.ddr3_tCKmin;
784 bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
785 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
786 "PC3", 0);
787
788 aprint_verbose_dev(self,
789 "%d rows, %d cols, %d log. banks, %d phys. banks, "
790 "%d.%03dns cycle time\n",
791 s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
792 1 << (s->sm_ddr3.ddr3_logbanks + 3),
793 s->sm_ddr3.ddr3_physbanks + 1,
794 cycle_time/1000, cycle_time % 1000);
795
796 #define __DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin)
797
798 aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin),
799 __DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin),
800 (s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) /
801 s->sm_ddr3.ddr3_tCKmin);
802
803 #undef __DDR3_CYCLES
804
805 /* For DDR3, Voltage is written in another area */
806 if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
807 || s->sm_ddr3.ddr3_125V) {
808 aprint_verbose("%s:", device_xname(self));
809 if (!s->sm_ddr3.ddr3_NOT15V)
810 aprint_verbose(" 1.5V");
811 if (s->sm_ddr3.ddr3_135V)
812 aprint_verbose(" 1.35V");
813 if (s->sm_ddr3.ddr3_125V)
814 aprint_verbose(" 1.25V");
815 aprint_verbose(" operable\n");
816 }
817 }
818
819 static void
820 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
821 {
822 int dimm_size, cycle_time, bits;
823
824 aprint_naive("\n");
825 aprint_normal("\n");
826 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
827
828 /*
829 * FB-DIMM module size calculation is very much like DDR3
830 */
831 dimm_size = s->sm_fbd.fbdimm_rows + 12 +
832 s->sm_fbd.fbdimm_cols + 9 - 20 - 3;
833 dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
834
835 cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
836 (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
837 s->sm_fbd.fbdimm_mtb_divisor;
838 bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
839 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
840 "PC2", 0);
841
842 aprint_verbose_dev(self,
843 "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
844 s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
845 1 << (s->sm_fbd.fbdimm_banks + 2),
846 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
847
848 #define __FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
849
850 aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
851 __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
852 (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
853 s->sm_fbd.fbdimm_tCKmin);
854
855 #undef __FBDIMM_CYCLES
856
857 decode_voltage_refresh(self, s);
858 }
859
860 static void
861 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
862 {
863 int dimm_size, cycle_time;
864 int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks;
865
866 aprint_naive("\n");
867 aprint_normal(": %20s\n", s->sm_ddr4.ddr4_part_number);
868 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
869 if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
870 aprint_normal(" (%s)",
871 spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
872 aprint_normal(", %stemp-sensor, ",
873 (s->sm_ddr4.ddr4_has_therm_sensor)?"":"no ");
874
875 /*
876 * DDR4 size calculation from JEDEC spec
877 *
878 * Module capacity in bytes is defined as
879 * Chip_Capacity_in_bits / 8bits-per-byte *
880 * primary_bus_width / DRAM_width *
881 * logical_ranks_per_DIMM
882 *
883 * logical_ranks_per DIMM equals package_ranks, but multiply
884 * by diecount for 3DS packages
885 *
886 * We further divide by 2**20 to get our answer in MB
887 */
888 dimm_size = (s->sm_ddr4.ddr4_capacity + 28) /* chip_capacity */
889 - 20 /* convert to MB */
890 - 3 /* bits --> bytes */
891 + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
892 switch (s->sm_ddr4.ddr4_device_width) { /* DRAM width */
893 case 0: dimm_size -= 2;
894 break;
895 case 1: dimm_size -= 3;
896 break;
897 case 2: dimm_size -= 4;
898 break;
899 case 4: dimm_size -= 5;
900 break;
901 default:
902 dimm_size = -1; /* flag invalid value */
903 }
904 if (dimm_size >= 0) {
905 dimm_size = (1 << dimm_size) *
906 (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
907 if (s->sm_ddr4.ddr4_signal_loading == 2) {
908 dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
909 }
910 }
911
912 #define __DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 + \
913 s->sm_ddr4.ddr4_##field##_ftb) - \
914 ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
915 /*
916 * For now, the only value for mtb is 1 = 125ps, and ftp = 1ps
917 * so we don't need to figure out the time-base units - just
918 * hard-code them for now.
919 */
920 cycle_time = __DDR4_VALUE(tCKAVGmin);
921 decode_size_speed(self, node, dimm_size, cycle_time, 2,
922 1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
923 TRUE, "PC4", 0);
924
925 aprint_verbose_dev(self,
926 "%d rows, %d cols, %d banks, %d bank groups, "
927 "%d.%03dns cycle time\n",
928 s->sm_ddr4.ddr4_rows + 9, s->sm_ddr4.ddr4_cols + 12,
929 1 << (2 + s->sm_ddr4.ddr4_logbanks),
930 1 << s->sm_ddr4.ddr4_bankgroups,
931 cycle_time / 1000, cycle_time % 1000);
932
933 /*
934 * Note that the ddr4_xxx_ftb fields are actually signed offsets from
935 * the corresponding mtb value, so we might have to subtract 256!
936 */
937
938 tAA_clocks = __DDR4_VALUE(tAAmin) * 1000 / cycle_time;
939 tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
940 tRP_clocks = __DDR4_VALUE(tRPmin) * 1000 / cycle_time;
941 tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
942 s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
943
944 /*
945 * Per JEDEC spec, rounding is done by taking the time value, dividing
946 * by the cycle time, subtracting .010 from the result, and then
947 * rounded up to the nearest integer. Unfortunately, none of their
948 * examples say what to do when the result of the subtraction is already
949 * an integer. For now, assume that we still round up (so an interval
950 * of exactly 12.010 clock cycles will be printed as 13).
951 */
952 #define __DDR4_ROUND(value) ((value - 10) / 1000 + 1)
953
954 aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
955 __DDR4_ROUND(tRCD_clocks),
956 __DDR4_ROUND(tRP_clocks),
957 __DDR4_ROUND(tRAS_clocks));
958
959 #undef __DDR4_VALUE
960 #undef __DDR4_ROUND
961 }
962