amdpm.c revision 1.35.6.2 1 /* $NetBSD: amdpm.c,v 1.35.6.2 2013/06/23 06:20:18 tls Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Enami Tsugutomo.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: amdpm.c,v 1.35.6.2 2013/06/23 06:20:18 tls Exp $");
34
35 #include "opt_amdpm.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/device.h>
41 #include <sys/callout.h>
42 #include <sys/rnd.h>
43 #include <sys/mutex.h>
44
45 #include <sys/bus.h>
46 #include <dev/ic/acpipmtimer.h>
47
48 #include <dev/i2c/i2cvar.h>
49
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcidevs.h>
53
54 #include <dev/pci/amdpmreg.h>
55 #include <dev/pci/amdpmvar.h>
56 #include <dev/pci/amdpm_smbusreg.h>
57
58 static void amdpm_rnd_callout(void *);
59 static void amdpm_rnd_callout_locked(void *);
60
61 #ifdef AMDPM_RND_COUNTERS
62 #define AMDPM_RNDCNT_INCR(ev) (ev)->ev_count++
63 #endif
64
65 static int
66 amdpm_match(device_t parent, cfdata_t match, void *aux)
67 {
68 struct pci_attach_args *pa = aux;
69
70 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_AMD) {
71 switch (PCI_PRODUCT(pa->pa_id)) {
72 case PCI_PRODUCT_AMD_PBC768_PMC:
73 case PCI_PRODUCT_AMD_PBC8111_ACPI:
74 return (1);
75 }
76 }
77 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NVIDIA) {
78 switch (PCI_PRODUCT(pa->pa_id)) {
79 case PCI_PRODUCT_NVIDIA_XBOX_SMBUS:
80 return (1);
81 }
82 }
83
84 return (0);
85 }
86
87 static void
88 amdpm_rnd_get(size_t bytes, void *priv)
89 {
90 struct amdpm_softc *sc = priv;
91
92 mutex_enter(&sc->sc_mutex);
93 sc->sc_rnd_need = bytes;
94 amdpm_rnd_callout_locked(sc);
95 mutex_exit(&sc->sc_mutex);
96 }
97
98 static void
99 amdpm_attach(device_t parent, device_t self, void *aux)
100 {
101 struct amdpm_softc *sc = device_private(self);
102 struct pci_attach_args *pa = aux;
103 pcireg_t confreg, pmptrreg;
104 u_int32_t pmreg;
105 int i;
106
107 pci_aprint_devinfo(pa, NULL);
108
109 sc->sc_dev = self;
110
111 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NVIDIA_XBOX_SMBUS)
112 sc->sc_nforce = 1;
113 else
114 sc->sc_nforce = 0;
115
116 sc->sc_pc = pa->pa_pc;
117 sc->sc_tag = pa->pa_tag;
118 sc->sc_iot = pa->pa_iot;
119 sc->sc_pa = pa;
120
121 #if 0
122 aprint_normal_dev(self, "");
123 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
124 #endif
125
126 confreg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG);
127 /* enable pm i/o space for AMD-8111 and nForce */
128 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_PBC8111_ACPI ||
129 sc->sc_nforce)
130 confreg |= AMDPM_PMIOEN;
131
132 /* Enable random number generation for everyone */
133 pci_conf_write(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG,
134 confreg | AMDPM_RNGEN);
135 confreg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG);
136
137 if ((confreg & AMDPM_PMIOEN) == 0) {
138 aprint_error_dev(self, "PMxx space isn't enabled\n");
139 return;
140 }
141
142 if (sc->sc_nforce) {
143 pmptrreg = pci_conf_read(pa->pa_pc, pa->pa_tag, NFORCE_PMPTR);
144 aprint_normal_dev(self, "power management at 0x%04x\n",
145 NFORCE_PMBASE(pmptrreg));
146 if (bus_space_map(sc->sc_iot, NFORCE_PMBASE(pmptrreg),
147 AMDPM_PMSIZE, 0, &sc->sc_ioh)) {
148 aprint_error_dev(self, "failed to map PMxx space\n");
149 return;
150 }
151 } else {
152 pmptrreg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_PMPTR);
153 if (bus_space_map(sc->sc_iot, AMDPM_PMBASE(pmptrreg),
154 AMDPM_PMSIZE, 0, &sc->sc_ioh)) {
155 aprint_error_dev(self, "failed to map PMxx space\n");
156 return;
157 }
158 }
159
160 /* don't attach a timecounter on nforce boards */
161 if ((confreg & AMDPM_TMRRST) == 0 && (confreg & AMDPM_STOPTMR) == 0 &&
162 !sc->sc_nforce) {
163 acpipmtimer_attach(self, sc->sc_iot, sc->sc_ioh,
164 AMDPM_TMR, ((confreg & AMDPM_TMR32) ? ACPIPMT_32BIT : 0));
165 }
166
167 /* XXX this mutex is IPL_VM because it can be taken by rnd_getmore() */
168 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
169
170 /* try to attach devices on the smbus */
171 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_PBC8111_ACPI ||
172 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_PBC768_PMC ||
173 sc->sc_nforce) {
174 amdpm_smbus_attach(sc);
175 }
176
177 if (confreg & AMDPM_RNGEN) {
178 /* Check to see if we can read data from the RNG. */
179 (void) bus_space_read_4(sc->sc_iot, sc->sc_ioh,
180 AMDPM_RNGDATA);
181 for (i = 0; i < 1000; i++) {
182 pmreg = bus_space_read_4(sc->sc_iot,
183 sc->sc_ioh, AMDPM_RNGSTAT);
184 if (pmreg & AMDPM_RNGDONE)
185 break;
186 delay(1);
187 }
188 if ((pmreg & AMDPM_RNGDONE) != 0) {
189 aprint_normal_dev(self, ""
190 "random number generator enabled (apprx. %dms)\n",
191 i);
192 callout_init(&sc->sc_rnd_ch, CALLOUT_MPSAFE);
193 rndsource_setcb(&sc->sc_rnd_source,
194 amdpm_rnd_get, sc);
195 rnd_attach_source(&sc->sc_rnd_source,
196 device_xname(self), RND_TYPE_RNG,
197 /*
198 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE
199 * XXX here is unobvious: we later feed raw bits
200 * XXX into the "entropy pool" with rnd_add_data,
201 * XXX explicitly supplying an entropy estimate.
202 * XXX In this context, NO_ESTIMATE serves only
203 * XXX to prevent rnd_add_data from trying to
204 * XXX use the *time at which we added the data*
205 * XXX as entropy, which is not a good idea since
206 * XXX we add data periodically from a callout.
207 */
208 RND_FLAG_NO_ESTIMATE|RND_FLAG_HASCB);
209 #ifdef AMDPM_RND_COUNTERS
210 evcnt_attach_dynamic(&sc->sc_rnd_hits, EVCNT_TYPE_MISC,
211 NULL, device_xname(self), "rnd hits");
212 evcnt_attach_dynamic(&sc->sc_rnd_miss, EVCNT_TYPE_MISC,
213 NULL, device_xname(self), "rnd miss");
214 for (i = 0; i < 256; i++) {
215 evcnt_attach_dynamic(&sc->sc_rnd_data[i],
216 EVCNT_TYPE_MISC, NULL, device_xname(self),
217 "rnd data");
218 }
219 #endif
220 sc->sc_rnd_need = RND_POOLBITS / NBBY;
221 amdpm_rnd_callout(sc);
222 }
223 }
224 }
225
226 CFATTACH_DECL_NEW(amdpm, sizeof(struct amdpm_softc),
227 amdpm_match, amdpm_attach, NULL, NULL);
228
229 static void
230 amdpm_rnd_callout_locked(void *v)
231 {
232 struct amdpm_softc *sc = v;
233 u_int32_t rngreg;
234 #ifdef AMDPM_RND_COUNTERS
235 int i;
236 #endif
237
238 if (sc->sc_rnd_need < 1) {
239 callout_stop(&sc->sc_rnd_ch);
240 return;
241 }
242
243 if ((bus_space_read_4(sc->sc_iot, sc->sc_ioh, AMDPM_RNGSTAT) &
244 AMDPM_RNGDONE) != 0) {
245 rngreg = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
246 AMDPM_RNGDATA);
247 rnd_add_data(&sc->sc_rnd_source, &rngreg,
248 sizeof(rngreg), sizeof(rngreg) * NBBY);
249 sc->sc_rnd_need -= sizeof(rngreg);
250 #ifdef AMDPM_RND_COUNTERS
251 AMDPM_RNDCNT_INCR(&sc->sc_rnd_hits);
252 for (i = 0; i < sizeof(rngreg); i++, rngreg >>= NBBY)
253 AMDPM_RNDCNT_INCR(&sc->sc_rnd_data[rngreg & 0xff]);
254 #endif
255 }
256 #ifdef AMDPM_RND_COUNTERS
257 else
258 AMDPM_RNDCNT_INCR(&sc->sc_rnd_miss);
259 #endif
260 if (sc->sc_rnd_need > 0) {
261 callout_reset(&sc->sc_rnd_ch, 1, amdpm_rnd_callout, sc);
262 }
263 }
264
265 static void
266 amdpm_rnd_callout(void *v)
267 {
268 struct amdpm_softc *sc = v;
269
270 mutex_enter(&sc->sc_mutex);
271 amdpm_rnd_callout_locked(v);
272 mutex_exit(&sc->sc_mutex);
273 }
274