spdmem.c revision 1.20 1 /* $NetBSD: spdmem.c,v 1.20 2015/12/24 14:16:18 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2007 Nicolas Joly
5 * Copyright (c) 2007 Paul Goyette
6 * Copyright (c) 2007 Tobias Nygren
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Serial Presence Detect (SPD) memory identification
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: spdmem.c,v 1.20 2015/12/24 14:16:18 msaitoh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/sysctl.h>
44 #include <machine/bswap.h>
45
46 #include <dev/i2c/i2cvar.h>
47 #include <dev/ic/spdmemreg.h>
48 #include <dev/ic/spdmemvar.h>
49
50 /* Routines for decoding spd data */
51 static void decode_edofpm(const struct sysctlnode *, device_t, struct spdmem *);
52 static void decode_rom(const struct sysctlnode *, device_t, struct spdmem *);
53 static void decode_sdram(const struct sysctlnode *, device_t, struct spdmem *,
54 int);
55 static void decode_ddr(const struct sysctlnode *, device_t, struct spdmem *);
56 static void decode_ddr2(const struct sysctlnode *, device_t, struct spdmem *);
57 static void decode_ddr3(const struct sysctlnode *, device_t, struct spdmem *);
58 static void decode_ddr4(const struct sysctlnode *, device_t, struct spdmem *);
59 static void decode_fbdimm(const struct sysctlnode *, device_t, struct spdmem *);
60
61 static void decode_size_speed(device_t, const struct sysctlnode *,
62 int, int, int, int, bool, const char *, int);
63 static void decode_voltage_refresh(device_t, struct spdmem *);
64
65 #define IS_RAMBUS_TYPE (s->sm_len < 4)
66
67 static const char* const spdmem_basic_types[] = {
68 "unknown",
69 "FPM",
70 "EDO",
71 "Pipelined Nibble",
72 "SDRAM",
73 "ROM",
74 "DDR SGRAM",
75 "DDR SDRAM",
76 "DDR2 SDRAM",
77 "DDR2 SDRAM FB",
78 "DDR2 SDRAM FB Probe",
79 "DDR3 SDRAM",
80 "DDR4 SDRAM",
81 "unknown",
82 "DDR4E SDRAM",
83 "LPDDR3 SDRAM",
84 "LPDDR4 SDRAM"
85 };
86
87 static const char* const spdmem_ddr4_module_types[] = {
88 "DDR4 Extended",
89 "DDR4 RDIMM",
90 "DDR4 UDIMM",
91 "DDR4 SO-DIMM",
92 "DDR4 Load-Reduced DIMM",
93 "DDR4 Mini-RDIMM",
94 "DDR4 Mini-UDIMM",
95 "DDR4 Reserved",
96 "DDR4 72Bit SO-RDIMM",
97 "DDR4 72Bit SO-UDIMM",
98 "DDR4 Undefined",
99 "DDR4 Reserved",
100 "DDR4 16Bit SO-DIMM",
101 "DDR4 32Bit SO-DIMM",
102 "DDR4 Reserved",
103 "DDR4 Undefined"
104 };
105
106 static const char* const spdmem_superset_types[] = {
107 "unknown",
108 "ESDRAM",
109 "DDR ESDRAM",
110 "PEM EDO",
111 "PEM SDRAM"
112 };
113
114 static const char* const spdmem_voltage_types[] = {
115 "TTL (5V tolerant)",
116 "LvTTL (not 5V tolerant)",
117 "HSTL 1.5V",
118 "SSTL 3.3V",
119 "SSTL 2.5V",
120 "SSTL 1.8V"
121 };
122
123 static const char* const spdmem_refresh_types[] = {
124 "15.625us",
125 "3.9us",
126 "7.8us",
127 "31.3us",
128 "62.5us",
129 "125us"
130 };
131
132 static const char* const spdmem_parity_types[] = {
133 "no parity or ECC",
134 "data parity",
135 "data ECC",
136 "data parity and ECC",
137 "cmd/addr parity",
138 "cmd/addr/data parity",
139 "cmd/addr parity, data ECC",
140 "cmd/addr/data parity, data ECC"
141 };
142
143 int spd_rom_sizes[] = { 0, 128, 256, 384, 512 };
144
145
146 /* Cycle time fractional values (units of .001 ns) for DDR2 SDRAM */
147 static const uint16_t spdmem_cycle_frac[] = {
148 0, 100, 200, 300, 400, 500, 600, 700, 800, 900,
149 250, 333, 667, 750, 999, 999
150 };
151
152 /* Format string for timing info */
153 #define LATENCY "tAA-tRCD-tRP-tRAS: %d-%d-%d-%d\n"
154
155 /* CRC functions used for certain memory types */
156
157 static uint16_t spdcrc16 (struct spdmem_softc *sc, int count)
158 {
159 uint16_t crc;
160 int i, j;
161 uint8_t val;
162 crc = 0;
163 for (j = 0; j <= count; j++) {
164 val = (sc->sc_read)(sc, j);
165 crc = crc ^ val << 8;
166 for (i = 0; i < 8; ++i)
167 if (crc & 0x8000)
168 crc = crc << 1 ^ 0x1021;
169 else
170 crc = crc << 1;
171 }
172 return (crc & 0xFFFF);
173 }
174
175 int
176 spdmem_common_probe(struct spdmem_softc *sc)
177 {
178 int cksum = 0;
179 uint8_t i, val, spd_type;
180 int spd_len, spd_crc_cover;
181 uint16_t crc_calc, crc_spd;
182
183 spd_type = (sc->sc_read)(sc, 2);
184
185 /* For older memory types, validate the checksum over 1st 63 bytes */
186 if (spd_type <= SPDMEM_MEMTYPE_DDR2SDRAM) {
187 for (i = 0; i < 63; i++)
188 cksum += (sc->sc_read)(sc, i);
189
190 val = (sc->sc_read)(sc, 63);
191
192 if (cksum == 0 || (cksum & 0xff) != val) {
193 aprint_debug("spd checksum failed, calc = 0x%02x, "
194 "spd = 0x%02x\n", cksum, val);
195 return 0;
196 } else
197 return 1;
198 }
199
200 /* For DDR3 and FBDIMM, verify the CRC */
201 else if (spd_type <= SPDMEM_MEMTYPE_DDR3SDRAM) {
202 spd_len = (sc->sc_read)(sc, 0);
203 if (spd_len & SPDMEM_SPDCRC_116)
204 spd_crc_cover = 116;
205 else
206 spd_crc_cover = 125;
207 switch (spd_len & SPDMEM_SPDLEN_MASK) {
208 case SPDMEM_SPDLEN_128:
209 spd_len = 128;
210 break;
211 case SPDMEM_SPDLEN_176:
212 spd_len = 176;
213 break;
214 case SPDMEM_SPDLEN_256:
215 spd_len = 256;
216 break;
217 default:
218 return 0;
219 }
220 if (spd_crc_cover > spd_len)
221 return 0;
222 crc_calc = spdcrc16(sc, spd_crc_cover);
223 crc_spd = (sc->sc_read)(sc, 127) << 8;
224 crc_spd |= (sc->sc_read)(sc, 126);
225 if (crc_calc != crc_spd) {
226 aprint_debug("crc16 failed, covers %d bytes, "
227 "calc = 0x%04x, spd = 0x%04x\n",
228 spd_crc_cover, crc_calc, crc_spd);
229 return 0;
230 }
231 return 1;
232 } else if (spd_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
233 spd_len = (sc->sc_read)(sc, 0) & 0x0f;
234 if ((unsigned int)spd_len > __arraycount(spd_rom_sizes))
235 return 0;
236 spd_len = spd_rom_sizes[spd_len];
237 spd_crc_cover = 125; /* For byte 0 to 125 */
238 if (spd_crc_cover > spd_len)
239 return 0;
240 crc_calc = spdcrc16(sc, spd_crc_cover);
241 crc_spd = (sc->sc_read)(sc, 127) << 8;
242 crc_spd |= (sc->sc_read)(sc, 126);
243 if (crc_calc != crc_spd) {
244 aprint_debug("crc16 failed, covers %d bytes, "
245 "calc = 0x%04x, spd = 0x%04x\n",
246 spd_crc_cover, crc_calc, crc_spd);
247 return 0;
248 }
249 /*
250 * We probably could also verify the CRC for the other
251 * "pages" of SPD data in blocks 1 and 2, but we'll do
252 * it some other time.
253 */
254 return 1;
255 } else
256 return 0;
257
258 /* For unrecognized memory types, don't match at all */
259 return 0;
260 }
261
262 void
263 spdmem_common_attach(struct spdmem_softc *sc, device_t self)
264 {
265 struct spdmem *s = &(sc->sc_spd_data);
266 const char *type;
267 const char *rambus_rev = "Reserved";
268 int dimm_size;
269 unsigned int i, spd_len, spd_size;
270 const struct sysctlnode *node = NULL;
271
272 s->sm_len = (sc->sc_read)(sc, 0);
273 s->sm_size = (sc->sc_read)(sc, 1);
274 s->sm_type = (sc->sc_read)(sc, 2);
275
276 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
277 /*
278 * An even newer encoding with one byte holding both
279 * the used-size and capacity values
280 */
281 spd_len = s->sm_len & 0x0f;
282 spd_size = (s->sm_len >> 4) & 0x07;
283
284 spd_len = spd_rom_sizes[spd_len];
285 spd_size *= 512;
286
287 } else if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) {
288 /*
289 * FBDIMM and DDR3 (and probably all newer) have a different
290 * encoding of the SPD EEPROM used/total sizes
291 */
292 spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK);
293 switch (s->sm_len & SPDMEM_SPDLEN_MASK) {
294 case SPDMEM_SPDLEN_128:
295 spd_len = 128;
296 break;
297 case SPDMEM_SPDLEN_176:
298 spd_len = 176;
299 break;
300 case SPDMEM_SPDLEN_256:
301 spd_len = 256;
302 break;
303 default:
304 spd_len = 64;
305 break;
306 }
307 } else {
308 spd_size = 1 << s->sm_size;
309 spd_len = s->sm_len;
310 if (spd_len < 64)
311 spd_len = 64;
312 }
313 if (spd_len > spd_size)
314 spd_len = spd_size;
315 if (spd_len > sizeof(struct spdmem))
316 spd_len = sizeof(struct spdmem);
317 for (i = 3; i < spd_len; i++)
318 ((uint8_t *)s)[i] = (sc->sc_read)(sc, i);
319
320 /*
321 * Setup our sysctl subtree, hw.spdmemN
322 */
323 sc->sc_sysctl_log = NULL;
324 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, &node,
325 0, CTLTYPE_NODE,
326 device_xname(self), NULL, NULL, 0, NULL, 0,
327 CTL_HW, CTL_CREATE, CTL_EOL);
328 if (node != NULL && spd_len != 0)
329 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
330 0,
331 CTLTYPE_STRUCT, "spd_data",
332 SYSCTL_DESCR("raw spd data"), NULL,
333 0, s, spd_len,
334 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
335
336 /*
337 * Decode and print key SPD contents
338 */
339 if (IS_RAMBUS_TYPE) {
340 if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS)
341 type = "Rambus";
342 else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS)
343 type = "Direct Rambus";
344 else
345 type = "Rambus (unknown)";
346
347 switch (s->sm_len) {
348 case 0:
349 rambus_rev = "Invalid";
350 break;
351 case 1:
352 rambus_rev = "0.7";
353 break;
354 case 2:
355 rambus_rev = "1.0";
356 break;
357 default:
358 rambus_rev = "Reserved";
359 break;
360 }
361 } else {
362 if (s->sm_type < __arraycount(spdmem_basic_types))
363 type = spdmem_basic_types[s->sm_type];
364 else
365 type = "unknown memory type";
366
367 if (s->sm_type == SPDMEM_MEMTYPE_EDO &&
368 s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM)
369 type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM];
370 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
371 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM)
372 type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM];
373 if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM &&
374 s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM)
375 type =
376 spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM];
377 if (s->sm_type == SPDMEM_MEMTYPE_SDRAM &&
378 s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) {
379 type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM];
380 }
381 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM &&
382 s->sm_ddr4.ddr4_mod_type <
383 __arraycount(spdmem_ddr4_module_types)) {
384 type = spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type];
385 }
386 }
387
388 strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN);
389
390 if (s->sm_type == SPDMEM_MEMTYPE_DDR4SDRAM) {
391 /*
392 * The latest spec (DDR4 SPD Document Release 3) defines
393 * NVDIMM Hybrid only.
394 */
395 if ((s->sm_ddr4.ddr4_hybrid)
396 && (s->sm_ddr4.ddr4_hybrid_media == 1))
397 strlcat(sc->sc_type, " NVDIMM hybrid",
398 SPDMEM_TYPE_MAXLEN);
399 }
400
401 if (node != NULL)
402 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
403 0,
404 CTLTYPE_STRING, "mem_type",
405 SYSCTL_DESCR("memory module type"), NULL,
406 0, sc->sc_type, 0,
407 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
408
409 if (IS_RAMBUS_TYPE) {
410 aprint_naive("\n");
411 aprint_normal("\n");
412 aprint_normal_dev(self, "%s, SPD Revision %s", type, rambus_rev);
413 dimm_size = 1 << (s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13);
414 if (dimm_size >= 1024)
415 aprint_normal(", %dGB\n", dimm_size / 1024);
416 else
417 aprint_normal(", %dMB\n", dimm_size);
418
419 /* No further decode for RAMBUS memory */
420 return;
421 }
422 switch (s->sm_type) {
423 case SPDMEM_MEMTYPE_EDO:
424 case SPDMEM_MEMTYPE_FPM:
425 decode_edofpm(node, self, s);
426 break;
427 case SPDMEM_MEMTYPE_ROM:
428 decode_rom(node, self, s);
429 break;
430 case SPDMEM_MEMTYPE_SDRAM:
431 decode_sdram(node, self, s, spd_len);
432 break;
433 case SPDMEM_MEMTYPE_DDRSDRAM:
434 decode_ddr(node, self, s);
435 break;
436 case SPDMEM_MEMTYPE_DDR2SDRAM:
437 decode_ddr2(node, self, s);
438 break;
439 case SPDMEM_MEMTYPE_DDR3SDRAM:
440 decode_ddr3(node, self, s);
441 break;
442 case SPDMEM_MEMTYPE_FBDIMM:
443 case SPDMEM_MEMTYPE_FBDIMM_PROBE:
444 decode_fbdimm(node, self, s);
445 break;
446 case SPDMEM_MEMTYPE_DDR4SDRAM:
447 decode_ddr4(node, self, s);
448 break;
449 }
450
451 /* Dump SPD */
452 for (i = 0; i < spd_len; i += 16) {
453 unsigned int j, k;
454 aprint_debug_dev(self, "0x%02x:", i);
455 k = (spd_len > (i + 16)) ? i + 16 : spd_len;
456 for (j = i; j < k; j++)
457 aprint_debug(" %02x", ((uint8_t *)s)[j]);
458 aprint_debug("\n");
459 }
460 }
461
462 int
463 spdmem_common_detach(struct spdmem_softc *sc, device_t self)
464 {
465 sysctl_teardown(&sc->sc_sysctl_log);
466
467 return 0;
468 }
469
470 static void
471 decode_size_speed(device_t self, const struct sysctlnode *node,
472 int dimm_size, int cycle_time, int d_clk, int bits,
473 bool round, const char *ddr_type_string, int speed)
474 {
475 int p_clk;
476 struct spdmem_softc *sc = device_private(self);
477
478 if (dimm_size < 1024)
479 aprint_normal("%dMB", dimm_size);
480 else
481 aprint_normal("%dGB", dimm_size / 1024);
482 if (node != NULL)
483 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
484 CTLFLAG_IMMEDIATE,
485 CTLTYPE_INT, "size",
486 SYSCTL_DESCR("module size in MB"), NULL,
487 dimm_size, NULL, 0,
488 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
489
490 if (cycle_time == 0) {
491 aprint_normal("\n");
492 return;
493 }
494
495 /*
496 * Calculate p_clk first, since for DDR3 we need maximum significance.
497 * DDR3 rating is not rounded to a multiple of 100. This results in
498 * cycle_time of 1.5ns displayed as PC3-10666.
499 *
500 * For SDRAM, the speed is provided by the caller so we use it.
501 */
502 d_clk *= 1000 * 1000;
503 if (speed)
504 p_clk = speed;
505 else
506 p_clk = (d_clk * bits) / 8 / cycle_time;
507 d_clk = ((d_clk + cycle_time / 2) ) / cycle_time;
508 if (round) {
509 if ((p_clk % 100) >= 50)
510 p_clk += 50;
511 p_clk -= p_clk % 100;
512 }
513 aprint_normal(", %dMHz (%s-%d)\n",
514 d_clk, ddr_type_string, p_clk);
515 if (node != NULL)
516 sysctl_createv(&sc->sc_sysctl_log, 0, NULL, NULL,
517 CTLFLAG_IMMEDIATE,
518 CTLTYPE_INT, "speed",
519 SYSCTL_DESCR("memory speed in MHz"),
520 NULL, d_clk, NULL, 0,
521 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
522 }
523
524 static void
525 decode_voltage_refresh(device_t self, struct spdmem *s)
526 {
527 const char *voltage, *refresh;
528
529 if (s->sm_voltage < __arraycount(spdmem_voltage_types))
530 voltage = spdmem_voltage_types[s->sm_voltage];
531 else
532 voltage = "unknown";
533
534 if (s->sm_refresh < __arraycount(spdmem_refresh_types))
535 refresh = spdmem_refresh_types[s->sm_refresh];
536 else
537 refresh = "unknown";
538
539 aprint_verbose_dev(self, "voltage %s, refresh time %s%s\n",
540 voltage, refresh,
541 s->sm_selfrefresh?" (self-refreshing)":"");
542 }
543
544 static void
545 decode_edofpm(const struct sysctlnode *node, device_t self, struct spdmem *s)
546 {
547
548 aprint_naive("\n");
549 aprint_normal("\n");
550 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
551
552 aprint_normal("\n");
553 aprint_verbose_dev(self,
554 "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n",
555 s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks,
556 s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC);
557 }
558
559 static void
560 decode_rom(const struct sysctlnode *node, device_t self, struct spdmem *s)
561 {
562
563 aprint_naive("\n");
564 aprint_normal("\n");
565 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
566
567 aprint_normal("\n");
568 aprint_verbose_dev(self, "%d rows, %d cols, %d banks\n",
569 s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks);
570 }
571
572 static void
573 decode_sdram(const struct sysctlnode *node, device_t self, struct spdmem *s,
574 int spd_len)
575 {
576 int dimm_size, cycle_time, bits, tAA, i, speed, freq;
577
578 aprint_naive("\n");
579 aprint_normal("\n");
580 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
581
582 aprint_normal("%s, %s, ",
583 (s->sm_sdr.sdr_mod_attrs & SPDMEM_SDR_MASK_REG)?
584 " (registered)":"",
585 (s->sm_config < __arraycount(spdmem_parity_types))?
586 spdmem_parity_types[s->sm_config]:"invalid parity");
587
588 dimm_size = 1 << (s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17);
589 dimm_size *= s->sm_sdr.sdr_banks * s->sm_sdr.sdr_banks_per_chip;
590
591 cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 +
592 s->sm_sdr.sdr_cycle_tenths * 100;
593 bits = le16toh(s->sm_sdr.sdr_datawidth);
594 if (s->sm_config == 1 || s->sm_config == 2)
595 bits -= 8;
596
597 /* Calculate speed here - from OpenBSD */
598 if (spd_len >= 128)
599 freq = ((uint8_t *)s)[126];
600 else
601 freq = 0;
602 switch (freq) {
603 /*
604 * Must check cycle time since some PC-133 DIMMs
605 * actually report PC-100
606 */
607 case 100:
608 case 133:
609 if (cycle_time < 8000)
610 speed = 133;
611 else
612 speed = 100;
613 break;
614 case 0x66: /* Legacy DIMMs use _hex_ 66! */
615 default:
616 speed = 66;
617 }
618 decode_size_speed(self, node, dimm_size, cycle_time, 1, bits, FALSE,
619 "PC", speed);
620
621 aprint_verbose_dev(self,
622 "%d rows, %d cols, %d banks, %d banks/chip, %d.%dns cycle time\n",
623 s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks,
624 s->sm_sdr.sdr_banks_per_chip, cycle_time/1000,
625 (cycle_time % 1000) / 100);
626
627 tAA = 0;
628 for (i = 0; i < 8; i++)
629 if (s->sm_sdr.sdr_tCAS & (1 << i))
630 tAA = i;
631 tAA++;
632 aprint_verbose_dev(self, LATENCY, tAA, s->sm_sdr.sdr_tRCD,
633 s->sm_sdr.sdr_tRP, s->sm_sdr.sdr_tRAS);
634
635 decode_voltage_refresh(self, s);
636 }
637
638 static void
639 decode_ddr(const struct sysctlnode *node, device_t self, struct spdmem *s)
640 {
641 int dimm_size, cycle_time, bits, tAA, i;
642
643 aprint_naive("\n");
644 aprint_normal("\n");
645 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
646
647 aprint_normal("%s, %s, ",
648 (s->sm_ddr.ddr_mod_attrs & SPDMEM_DDR_MASK_REG)?
649 " (registered)":"",
650 (s->sm_config < __arraycount(spdmem_parity_types))?
651 spdmem_parity_types[s->sm_config]:"invalid parity");
652
653 dimm_size = 1 << (s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17);
654 dimm_size *= s->sm_ddr.ddr_ranks * s->sm_ddr.ddr_banks_per_chip;
655
656 cycle_time = s->sm_ddr.ddr_cycle_whole * 1000 +
657 spdmem_cycle_frac[s->sm_ddr.ddr_cycle_tenths];
658 bits = le16toh(s->sm_ddr.ddr_datawidth);
659 if (s->sm_config == 1 || s->sm_config == 2)
660 bits -= 8;
661 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
662 "PC", 0);
663
664 aprint_verbose_dev(self,
665 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%dns cycle time\n",
666 s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks,
667 s->sm_ddr.ddr_banks_per_chip, cycle_time/1000,
668 (cycle_time % 1000 + 50) / 100);
669
670 tAA = 0;
671 for (i = 2; i < 8; i++)
672 if (s->sm_ddr.ddr_tCAS & (1 << i))
673 tAA = i;
674 tAA /= 2;
675
676 #define __DDR_ROUND(scale, field) \
677 ((scale * s->sm_ddr.field + cycle_time - 1) / cycle_time)
678
679 aprint_verbose_dev(self, LATENCY, tAA, __DDR_ROUND(250, ddr_tRCD),
680 __DDR_ROUND(250, ddr_tRP), __DDR_ROUND(1000, ddr_tRAS));
681
682 #undef __DDR_ROUND
683
684 decode_voltage_refresh(self, s);
685 }
686
687 static void
688 decode_ddr2(const struct sysctlnode *node, device_t self, struct spdmem *s)
689 {
690 int dimm_size, cycle_time, bits, tAA, i;
691
692 aprint_naive("\n");
693 aprint_normal("\n");
694 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
695
696 aprint_normal("%s, %s, ",
697 (s->sm_ddr2.ddr2_mod_attrs & SPDMEM_DDR2_MASK_REG)?
698 " (registered)":"",
699 (s->sm_config < __arraycount(spdmem_parity_types))?
700 spdmem_parity_types[s->sm_config]:"invalid parity");
701
702 dimm_size = 1 << (s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17);
703 dimm_size *= (s->sm_ddr2.ddr2_ranks + 1) *
704 s->sm_ddr2.ddr2_banks_per_chip;
705
706 cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 +
707 spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac];
708 bits = s->sm_ddr2.ddr2_datawidth;
709 if ((s->sm_config & 0x03) != 0)
710 bits -= 8;
711 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
712 "PC2", 0);
713
714 aprint_verbose_dev(self,
715 "%d rows, %d cols, %d ranks, %d banks/chip, %d.%02dns cycle time\n",
716 s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols,
717 s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip,
718 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
719
720 tAA = 0;
721 for (i = 2; i < 8; i++)
722 if (s->sm_ddr2.ddr2_tCAS & (1 << i))
723 tAA = i;
724
725 #define __DDR2_ROUND(scale, field) \
726 ((scale * s->sm_ddr2.field + cycle_time - 1) / cycle_time)
727
728 aprint_verbose_dev(self, LATENCY, tAA, __DDR2_ROUND(250, ddr2_tRCD),
729 __DDR2_ROUND(250, ddr2_tRP), __DDR2_ROUND(1000, ddr2_tRAS));
730
731 #undef __DDR_ROUND
732
733 decode_voltage_refresh(self, s);
734 }
735
736 static void
737 decode_ddr3(const struct sysctlnode *node, device_t self, struct spdmem *s)
738 {
739 int dimm_size, cycle_time, bits;
740
741 aprint_naive("\n");
742 aprint_normal(": %18s\n", s->sm_ddr3.ddr3_part);
743 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
744
745 if (s->sm_ddr3.ddr3_mod_type ==
746 SPDMEM_DDR3_TYPE_MINI_RDIMM ||
747 s->sm_ddr3.ddr3_mod_type == SPDMEM_DDR3_TYPE_RDIMM)
748 aprint_normal(" (registered)");
749 aprint_normal(", %sECC, %stemp-sensor, ",
750 (s->sm_ddr3.ddr3_hasECC)?"":"no ",
751 (s->sm_ddr3.ddr3_has_therm_sensor)?"":"no ");
752
753 /*
754 * DDR3 size specification is quite different from others
755 *
756 * Module capacity is defined as
757 * Chip_Capacity_in_bits / 8bits-per-byte *
758 * external_bus_width / internal_bus_width
759 * We further divide by 2**20 to get our answer in MB
760 */
761 dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 +
762 (s->sm_ddr3.ddr3_datawidth + 3) -
763 (s->sm_ddr3.ddr3_chipwidth + 2);
764 dimm_size = (1 << dimm_size) * (s->sm_ddr3.ddr3_physbanks + 1);
765
766 cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend +
767 (s->sm_ddr3.ddr3_mtb_divisor / 2)) /
768 s->sm_ddr3.ddr3_mtb_divisor;
769 cycle_time *= s->sm_ddr3.ddr3_tCKmin;
770 bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3);
771 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, FALSE,
772 "PC3", 0);
773
774 aprint_verbose_dev(self,
775 "%d rows, %d cols, %d log. banks, %d phys. banks, "
776 "%d.%03dns cycle time\n",
777 s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12,
778 1 << (s->sm_ddr3.ddr3_logbanks + 3),
779 s->sm_ddr3.ddr3_physbanks + 1,
780 cycle_time/1000, cycle_time % 1000);
781
782 #define __DDR3_CYCLES(field) (s->sm_ddr3.field / s->sm_ddr3.ddr3_tCKmin)
783
784 aprint_verbose_dev(self, LATENCY, __DDR3_CYCLES(ddr3_tAAmin),
785 __DDR3_CYCLES(ddr3_tRCDmin), __DDR3_CYCLES(ddr3_tRPmin),
786 (s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) /
787 s->sm_ddr3.ddr3_tCKmin);
788
789 #undef __DDR3_CYCLES
790
791 /* For DDR3, Voltage is written in another area */
792 if (!s->sm_ddr3.ddr3_NOT15V || s->sm_ddr3.ddr3_135V
793 || s->sm_ddr3.ddr3_125V) {
794 aprint_verbose("%s:", device_xname(self));
795 if (!s->sm_ddr3.ddr3_NOT15V)
796 aprint_verbose(" 1.5V");
797 if (s->sm_ddr3.ddr3_135V)
798 aprint_verbose(" 1.35V");
799 if (s->sm_ddr3.ddr3_125V)
800 aprint_verbose(" 1.25V");
801 aprint_verbose(" operable\n");
802 }
803 }
804
805 static void
806 decode_fbdimm(const struct sysctlnode *node, device_t self, struct spdmem *s)
807 {
808 int dimm_size, cycle_time, bits;
809
810 aprint_naive("\n");
811 aprint_normal("\n");
812 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
813
814 /*
815 * FB-DIMM module size calculation is very much like DDR3
816 */
817 dimm_size = s->sm_fbd.fbdimm_rows + 12 +
818 s->sm_fbd.fbdimm_cols + 9 - 20 - 3;
819 dimm_size = (1 << dimm_size) * (1 << (s->sm_fbd.fbdimm_banks + 2));
820
821 cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend +
822 (s->sm_fbd.fbdimm_mtb_divisor / 2)) /
823 s->sm_fbd.fbdimm_mtb_divisor;
824 bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2);
825 decode_size_speed(self, node, dimm_size, cycle_time, 2, bits, TRUE,
826 "PC2", 0);
827
828 aprint_verbose_dev(self,
829 "%d rows, %d cols, %d banks, %d.%02dns cycle time\n",
830 s->sm_fbd.fbdimm_rows, s->sm_fbd.fbdimm_cols,
831 1 << (s->sm_fbd.fbdimm_banks + 2),
832 cycle_time / 1000, (cycle_time % 1000 + 5) /10 );
833
834 #define __FBDIMM_CYCLES(field) (s->sm_fbd.field / s->sm_fbd.fbdimm_tCKmin)
835
836 aprint_verbose_dev(self, LATENCY, __FBDIMM_CYCLES(fbdimm_tAAmin),
837 __FBDIMM_CYCLES(fbdimm_tRCDmin), __FBDIMM_CYCLES(fbdimm_tRPmin),
838 (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) /
839 s->sm_fbd.fbdimm_tCKmin);
840
841 #undef __FBDIMM_CYCLES
842
843 decode_voltage_refresh(self, s);
844 }
845
846 static void
847 decode_ddr4(const struct sysctlnode *node, device_t self, struct spdmem *s)
848 {
849 int dimm_size, cycle_time;
850 int tAA_clocks, tRCD_clocks,tRP_clocks, tRAS_clocks;
851
852 aprint_naive("\n");
853 aprint_normal(": %20s\n", s->sm_ddr4.ddr4_part_number);
854 aprint_normal_dev(self, "%s", spdmem_basic_types[s->sm_type]);
855 if (s->sm_ddr4.ddr4_mod_type < __arraycount(spdmem_ddr4_module_types))
856 aprint_normal(" (%s)",
857 spdmem_ddr4_module_types[s->sm_ddr4.ddr4_mod_type]);
858 aprint_normal(", %stemp-sensor, ",
859 (s->sm_ddr4.ddr4_has_therm_sensor)?"":"no ");
860
861 /*
862 * DDR4 size calculation from JEDEC spec
863 *
864 * Module capacity in bytes is defined as
865 * Chip_Capacity_in_bits / 8bits-per-byte *
866 * primary_bus_width / DRAM_width *
867 * logical_ranks_per_DIMM
868 *
869 * logical_ranks_per DIMM equals package_ranks, but multiply
870 * by diecount for 3DS packages
871 *
872 * We further divide by 2**20 to get our answer in MB
873 */
874 dimm_size = (s->sm_ddr4.ddr4_capacity + 28) /* chip_capacity */
875 - 20 /* convert to MB */
876 - 3 /* bits --> bytes */
877 + (s->sm_ddr4.ddr4_primary_bus_width + 3); /* bus width */
878 switch (s->sm_ddr4.ddr4_device_width) { /* DRAM width */
879 case 0: dimm_size -= 2;
880 break;
881 case 1: dimm_size -= 3;
882 break;
883 case 2: dimm_size -= 4;
884 break;
885 case 4: dimm_size -= 5;
886 break;
887 default:
888 dimm_size = -1; /* flag invalid value */
889 }
890 if (dimm_size >= 0) {
891 dimm_size = (1 << dimm_size) *
892 (s->sm_ddr4.ddr4_package_ranks + 1); /* log.ranks/DIMM */
893 if (s->sm_ddr4.ddr4_signal_loading == 2) {
894 dimm_size *= (s->sm_ddr4.ddr4_diecount + 1);
895 }
896 }
897
898 #define __DDR4_VALUE(field) ((s->sm_ddr4.ddr4_##field##_mtb * 125 + \
899 s->sm_ddr4.ddr4_##field##_ftb) - \
900 ((s->sm_ddr4.ddr4_##field##_ftb > 127)?256:0))
901 /*
902 * For now, the only value for mtb is 1 = 125ps, and ftp = 1ps
903 * so we don't need to figure out the time-base units - just
904 * hard-code them for now.
905 */
906 cycle_time = __DDR4_VALUE(tCKAVGmin);
907 decode_size_speed(self, node, dimm_size, cycle_time, 2,
908 1 << (s->sm_ddr4.ddr4_primary_bus_width + 3),
909 TRUE, "PC4", 0);
910
911 aprint_verbose_dev(self,
912 "%d rows, %d cols, %d banks, %d bank groups, "
913 "%d.%03dns cycle time\n",
914 s->sm_ddr4.ddr4_rows + 9, s->sm_ddr4.ddr4_cols + 12,
915 1 << (2 + s->sm_ddr4.ddr4_logbanks),
916 1 << s->sm_ddr4.ddr4_bankgroups,
917 cycle_time / 1000, cycle_time % 1000);
918
919 /*
920 * Note that the ddr4_xxx_ftb fields are actually signed offsets from
921 * the corresponding mtb value, so we might have to subtract 256!
922 */
923
924 tAA_clocks = __DDR4_VALUE(tAAmin) * 1000 / cycle_time;
925 tRCD_clocks = __DDR4_VALUE(tRCDmin) * 1000 / cycle_time;
926 tRP_clocks = __DDR4_VALUE(tRPmin) * 1000 / cycle_time;
927 tRAS_clocks = (s->sm_ddr4.ddr4_tRASmin_msb * 256 +
928 s->sm_ddr4.ddr4_tRASmin_lsb) * 125 * 1000 / cycle_time;
929
930 /*
931 * Per JEDEC spec, rounding is done by taking the time value, dividing
932 * by the cycle time, subtracting .010 from the result, and then
933 * rounded up to the nearest integer. Unfortunately, none of their
934 * examples say what to do when the result of the subtraction is already
935 * an integer. For now, assume that we still round up (so an interval
936 * of exactly 12.010 clock cycles will be printed as 13).
937 */
938 #define __DDR4_ROUND(value) ((value - 10) / 1000 + 1)
939
940 aprint_verbose_dev(self, LATENCY, __DDR4_ROUND(tAA_clocks),
941 __DDR4_ROUND(tRCD_clocks),
942 __DDR4_ROUND(tRP_clocks),
943 __DDR4_ROUND(tRAS_clocks));
944
945 #undef __DDR4_VALUE
946 #undef __DDR4_ROUND
947 }
948