qcompep.c revision 1.2 1 /* $NetBSD: qcompep.c,v 1.2 2025/01/08 22:58:05 jmcneill Exp $ */
2 /* $OpenBSD: qcaoss.c,v 1.1 2023/05/23 14:10:27 patrick Exp $ */
3 /* $OpenBSD: qccpucp.c,v 1.1 2024/11/16 21:17:54 tobhe Exp $ */
4 /*
5 * Copyright (c) 2023 Patrick Wildt <patrick (at) blueri.se>
6 * Copyright (c) 2024 Tobias Heider <tobhe (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/device.h>
24 #include <sys/kmem.h>
25
26 #include <dev/acpi/acpivar.h>
27 #include <dev/acpi/qcompep.h>
28 #include <dev/acpi/qcomipcc.h>
29
30 #include <dev/ic/scmi.h>
31
32 #define AOSS_DESC_MAGIC 0x0
33 #define AOSS_DESC_VERSION 0x4
34 #define AOSS_DESC_FEATURES 0x8
35 #define AOSS_DESC_UCORE_LINK_STATE 0xc
36 #define AOSS_DESC_UCORE_LINK_STATE_ACK 0x10
37 #define AOSS_DESC_UCORE_CH_STATE 0x14
38 #define AOSS_DESC_UCORE_CH_STATE_ACK 0x18
39 #define AOSS_DESC_UCORE_MBOX_SIZE 0x1c
40 #define AOSS_DESC_UCORE_MBOX_OFFSET 0x20
41 #define AOSS_DESC_MCORE_LINK_STATE 0x24
42 #define AOSS_DESC_MCORE_LINK_STATE_ACK 0x28
43 #define AOSS_DESC_MCORE_CH_STATE 0x2c
44 #define AOSS_DESC_MCORE_CH_STATE_ACK 0x30
45 #define AOSS_DESC_MCORE_MBOX_SIZE 0x34
46 #define AOSS_DESC_MCORE_MBOX_OFFSET 0x38
47
48 #define AOSS_MAGIC 0x4d41494c
49 #define AOSS_VERSION 1
50
51 #define AOSS_STATE_UP (0xffffU << 0)
52 #define AOSS_STATE_DOWN (0xffffU << 16)
53
54 #define AOSSREAD4(sc, reg) \
55 bus_space_read_4((sc)->sc_iot, (sc)->sc_aoss_ioh, (reg))
56 #define AOSSWRITE4(sc, reg, val) \
57 bus_space_write_4((sc)->sc_iot, (sc)->sc_aoss_ioh, (reg), (val))
58
59 #define CPUCP_REG_CMD(i) (0x104 + ((i) * 8))
60 #define CPUCP_MASK_CMD 0xffffffffffffffffULL
61 #define CPUCP_REG_RX_MAP 0x4000
62 #define CPUCP_REG_RX_STAT 0x4400
63 #define CPUCP_REG_RX_CLEAR 0x4800
64 #define CPUCP_REG_RX_EN 0x4C00
65
66 #define RXREAD8(sc, reg) \
67 (bus_space_read_8((sc)->sc_iot, (sc)->sc_cpucp_rx_ioh, (reg)))
68 #define RXWRITE8(sc, reg, val) \
69 bus_space_write_8((sc)->sc_iot, (sc)->sc_cpucp_rx_ioh, (reg), (val))
70
71 #define TXWRITE4(sc, reg, val) \
72 bus_space_write_4((sc)->sc_iot, (sc)->sc_cpucp_tx_ioh, (reg), (val))
73
74
75 struct qcpep_data {
76 bus_addr_t aoss_base;
77 bus_size_t aoss_size;
78 uint32_t aoss_client_id;
79 uint32_t aoss_signal_id;
80 bus_addr_t cpucp_rx_base;
81 bus_size_t cpucp_rx_size;
82 bus_addr_t cpucp_tx_base;
83 bus_size_t cpucp_tx_size;
84 bus_addr_t cpucp_shmem_base;
85 bus_size_t cpucp_shmem_size;
86 };
87
88 struct qcpep_softc {
89 device_t sc_dev;
90 bus_space_tag_t sc_iot;
91
92 const struct qcpep_data *sc_data;
93
94 bus_space_handle_t sc_aoss_ioh;
95 size_t sc_aoss_offset;
96 size_t sc_aoss_size;
97 void * sc_aoss_ipcc;
98
99 bus_space_handle_t sc_cpucp_rx_ioh;
100 bus_space_handle_t sc_cpucp_tx_ioh;
101
102 struct scmi_softc sc_scmi;
103 };
104
105 struct qcpep_softc *qcpep_sc;
106
107 static const struct qcpep_data qcpep_x1e_data = {
108 .aoss_base = 0x0c300000,
109 .aoss_size = 0x400,
110 .aoss_client_id = 0, /* IPCC_CLIENT_AOP */
111 .aoss_signal_id = 0, /* IPCC_MPROC_SIGNAL_GLINK_QMP */
112 .cpucp_rx_base = 0x17430000,
113 .cpucp_rx_size = 0x10000,
114 .cpucp_tx_base = 0x18830000,
115 .cpucp_tx_size = 0x10000,
116 .cpucp_shmem_base = 0x18b4e000,
117 .cpucp_shmem_size = 0x400,
118 };
119
120 static const struct device_compatible_entry compat_data[] = {
121 { .compat = "QCOM0C17", .data = &qcpep_x1e_data },
122 DEVICE_COMPAT_EOL
123 };
124
125 static int qcpep_match(device_t, cfdata_t, void *);
126 static void qcpep_attach(device_t, device_t, void *);
127
128 CFATTACH_DECL_NEW(qcompep, sizeof(struct qcpep_softc),
129 qcpep_match, qcpep_attach, NULL, NULL);
130
131 static int
132 qcpep_match(device_t parent, cfdata_t match, void *aux)
133 {
134 struct acpi_attach_args *aa = aux;
135
136 return acpi_compatible_match(aa, compat_data);
137 }
138
139 static void
140 qcpep_attach(device_t parent, device_t self, void *aux)
141 {
142 struct qcpep_softc *sc = device_private(self);
143 struct acpi_attach_args *aa = aux;
144 CPU_INFO_ITERATOR cii;
145 struct cpu_info *ci;
146 struct acpi_resources res;
147 uint8_t *scmi_shmem;
148 ACPI_STATUS rv;
149 int i, last_pkg;;
150
151 rv = acpi_resource_parse(self, aa->aa_node->ad_handle,
152 "_CRS", &res, &acpi_resource_parse_ops_default);
153 if (ACPI_FAILURE(rv)) {
154 return;
155 }
156 acpi_resource_cleanup(&res);
157
158 sc->sc_dev = self;
159 sc->sc_iot = aa->aa_memt;
160 sc->sc_data = acpi_compatible_lookup(aa, compat_data)->data;
161
162 if (bus_space_map(sc->sc_iot, sc->sc_data->aoss_base,
163 sc->sc_data->aoss_size, BUS_SPACE_MAP_NONPOSTED, &sc->sc_aoss_ioh)) {
164 aprint_error_dev(self, "couldn't map aoss registers\n");
165 return;
166 }
167 if (bus_space_map(sc->sc_iot, sc->sc_data->cpucp_rx_base,
168 sc->sc_data->cpucp_rx_size, BUS_SPACE_MAP_NONPOSTED,
169 &sc->sc_cpucp_rx_ioh)) {
170 aprint_error_dev(self, "couldn't map cpucp rx registers\n");
171 return;
172 }
173 if (bus_space_map(sc->sc_iot, sc->sc_data->cpucp_tx_base,
174 sc->sc_data->cpucp_tx_size, BUS_SPACE_MAP_NONPOSTED,
175 &sc->sc_cpucp_tx_ioh)) {
176 aprint_error_dev(self, "couldn't map cpucp tx registers\n");
177 return;
178 }
179
180 sc->sc_aoss_ipcc = qcipcc_channel(sc->sc_data->aoss_client_id,
181 sc->sc_data->aoss_signal_id);
182 if (sc->sc_aoss_ipcc == NULL) {
183 aprint_error_dev(self, "couldn't find ipcc mailbox\n");
184 return;
185 }
186
187 if (AOSSREAD4(sc, AOSS_DESC_MAGIC) != AOSS_MAGIC ||
188 AOSSREAD4(sc, AOSS_DESC_VERSION) != AOSS_VERSION) {
189 aprint_error_dev(self, "invalid QMP info\n");
190 return;
191 }
192
193 sc->sc_aoss_offset = AOSSREAD4(sc, AOSS_DESC_MCORE_MBOX_OFFSET);
194 sc->sc_aoss_size = AOSSREAD4(sc, AOSS_DESC_MCORE_MBOX_SIZE);
195 if (sc->sc_aoss_size == 0) {
196 aprint_error_dev(self, "invalid AOSS mailbox size\n");
197 return;
198 }
199
200 AOSSWRITE4(sc, AOSS_DESC_UCORE_LINK_STATE_ACK,
201 AOSSREAD4(sc, AOSS_DESC_UCORE_LINK_STATE));
202
203 AOSSWRITE4(sc, AOSS_DESC_MCORE_LINK_STATE, AOSS_STATE_UP);
204 qcipcc_send(sc->sc_aoss_ipcc);
205
206 for (i = 1000; i > 0; i--) {
207 if (AOSSREAD4(sc, AOSS_DESC_MCORE_LINK_STATE_ACK) == AOSS_STATE_UP)
208 break;
209 delay(1000);
210 }
211 if (i == 0) {
212 aprint_error_dev(self, "didn't get link state ack\n");
213 return;
214 }
215
216 AOSSWRITE4(sc, AOSS_DESC_MCORE_CH_STATE, AOSS_STATE_UP);
217 qcipcc_send(sc->sc_aoss_ipcc);
218
219 for (i = 1000; i > 0; i--) {
220 if (AOSSREAD4(sc, AOSS_DESC_UCORE_CH_STATE) == AOSS_STATE_UP)
221 break;
222 delay(1000);
223 }
224 if (i == 0) {
225 aprint_error_dev(self, "didn't get open channel\n");
226 return;
227 }
228
229 AOSSWRITE4(sc, AOSS_DESC_UCORE_CH_STATE_ACK, AOSS_STATE_UP);
230 qcipcc_send(sc->sc_aoss_ipcc);
231
232 for (i = 1000; i > 0; i--) {
233 if (AOSSREAD4(sc, AOSS_DESC_MCORE_CH_STATE_ACK) == AOSS_STATE_UP)
234 break;
235 delay(1000);
236 }
237 if (i == 0) {
238 aprint_error_dev(self, "didn't get channel ack\n");
239 return;
240 }
241
242 RXWRITE8(sc, CPUCP_REG_RX_EN, 0);
243 RXWRITE8(sc, CPUCP_REG_RX_CLEAR, 0);
244 RXWRITE8(sc, CPUCP_REG_RX_MAP, 0);
245 RXWRITE8(sc, CPUCP_REG_RX_MAP, CPUCP_MASK_CMD);
246
247 qcpep_sc = sc;
248
249 /* SCMI setup */
250 scmi_shmem = AcpiOsMapMemory(sc->sc_data->cpucp_shmem_base,
251 sc->sc_data->cpucp_shmem_size);
252 if (scmi_shmem == NULL) {
253 aprint_error_dev(self, "couldn't map SCMI shared memory\n");
254 return;
255 }
256
257 sc->sc_scmi.sc_dev = self;
258 sc->sc_scmi.sc_iot = sc->sc_iot;
259 sc->sc_scmi.sc_shmem_tx = (struct scmi_shmem *)(scmi_shmem + 0x000);
260 sc->sc_scmi.sc_shmem_rx = (struct scmi_shmem *)(scmi_shmem + 0x200);
261 sc->sc_scmi.sc_mbox_tx = qccpucp_channel(0);
262 sc->sc_scmi.sc_mbox_tx_send = qccpucp_send;
263 sc->sc_scmi.sc_mbox_rx = qccpucp_channel(2);
264 sc->sc_scmi.sc_mbox_rx_send = qccpucp_send;
265 /* Build performance domain to CPU map. */
266 sc->sc_scmi.sc_perf_ndmap = 0;
267 last_pkg = -1;
268 for (CPU_INFO_FOREACH(cii, ci)) {
269 if (ci->ci_package_id != last_pkg) {
270 sc->sc_scmi.sc_perf_ndmap++;
271 last_pkg = ci->ci_package_id;
272 }
273 }
274 sc->sc_scmi.sc_perf_dmap = kmem_zalloc(
275 sizeof(*sc->sc_scmi.sc_perf_dmap) * sc->sc_scmi.sc_perf_ndmap,
276 KM_SLEEP);
277 last_pkg = -1;
278 i = 0;
279 for (CPU_INFO_FOREACH(cii, ci)) {
280 if (ci->ci_package_id != last_pkg) {
281 sc->sc_scmi.sc_perf_dmap[i].pm_domain = i;
282 sc->sc_scmi.sc_perf_dmap[i].pm_ci = ci;
283 last_pkg = ci->ci_package_id;
284 i++;
285 }
286 }
287 if (scmi_init_mbox(&sc->sc_scmi) != 0) {
288 aprint_error_dev(self, "couldn't setup SCMI\n");
289 return;
290 }
291 scmi_attach_perf(&sc->sc_scmi);
292 }
293
294 int
295 qcaoss_send(char *data, size_t len)
296 {
297 struct qcpep_softc *sc = qcpep_sc;
298 uint32_t reg;
299 int i;
300
301 if (sc == NULL)
302 return ENXIO;
303
304 if (data == NULL || sizeof(uint32_t) + len > sc->sc_aoss_size ||
305 (len % sizeof(uint32_t)) != 0)
306 return EINVAL;
307
308 /* Write data first, needs to be 32-bit access. */
309 for (i = 0; i < len; i += 4) {
310 memcpy(®, data + i, sizeof(reg));
311 AOSSWRITE4(sc, sc->sc_aoss_offset + sizeof(uint32_t) + i, reg);
312 }
313
314 /* Commit transaction by writing length. */
315 AOSSWRITE4(sc, sc->sc_aoss_offset, len);
316
317 /* Assert it's stored and inform peer. */
318 if (AOSSREAD4(sc, sc->sc_aoss_offset) != len) {
319 device_printf(sc->sc_dev,
320 "aoss message readback failed\n");
321 }
322 qcipcc_send(sc->sc_aoss_ipcc);
323
324 for (i = 1000; i > 0; i--) {
325 if (AOSSREAD4(sc, sc->sc_aoss_offset) == 0)
326 break;
327 delay(1000);
328 }
329 if (i == 0) {
330 device_printf(sc->sc_dev, "timeout sending message\n");
331 AOSSWRITE4(sc, sc->sc_aoss_offset, 0);
332 return ETIMEDOUT;
333 }
334
335 return 0;
336 }
337
338 void *
339 qccpucp_channel(u_int id)
340 {
341 struct qcpep_softc *sc = qcpep_sc;
342 uint64_t val;
343
344 if (sc == NULL || id > 2) {
345 return NULL;
346 }
347
348 val = RXREAD8(sc, CPUCP_REG_RX_EN);
349 val |= (1 << id);
350 RXWRITE8(sc, CPUCP_REG_RX_EN, val);
351
352 return (void *)(uintptr_t)(id + 1);
353 }
354
355 int
356 qccpucp_send(void *cookie)
357 {
358 struct qcpep_softc *sc = qcpep_sc;
359 uintptr_t id = (uintptr_t)cookie - 1;
360
361 TXWRITE4(sc, CPUCP_REG_CMD(id), 0);
362
363 return 0;
364 }
365