tpm.c revision 1.19.6.1 1 /* $NetBSD: tpm.c,v 1.19.6.1 2021/05/31 22:15:18 cjep Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2008, 2009 Michael Shalayeff
34 * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35 * All rights reserved.
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.19.6.1 2021/05/31 22:15:18 cjep Exp $");
52
53 #include <sys/param.h>
54 #include <sys/types.h>
55
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/pmf.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/workqueue.h>
66
67 #include <dev/ic/tpmreg.h>
68 #include <dev/ic/tpmvar.h>
69
70 #include "ioconf.h"
71
72 CTASSERT(sizeof(struct tpm_header) == 10);
73
74 #define TPM_BUFSIZ 1024
75
76 #define TPM_PARAM_SIZE 0x0001 /* that's a flag */
77
78 /* Timeouts. */
79 #define TPM_ACCESS_TMO 2000 /* 2sec */
80 #define TPM_READY_TMO 2000 /* 2sec */
81 #define TPM_READ_TMO 2000 /* 2sec */
82 #define TPM_BURST_TMO 2000 /* 2sec */
83
84 #define TPM_CAPS_REQUIRED \
85 (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
86 TPM_INTF_INT_LEVEL_LOW)
87
88 static inline int
89 tpm_tmotohz(int tmo)
90 {
91 struct timeval tv;
92
93 tv.tv_sec = tmo / 1000;
94 tv.tv_usec = 1000 * (tmo % 1000);
95
96 return tvtohz(&tv);
97 }
98
99 static int
100 tpm_getburst(struct tpm_softc *sc)
101 {
102 int burst, to, rv;
103
104 to = tpm_tmotohz(TPM_BURST_TMO);
105
106 while (to--) {
107 /*
108 * Burst count is in bits 23:8, so read the two higher bytes.
109 */
110 burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
111 burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
112 << 8;
113
114 if (burst)
115 return burst;
116
117 rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
118 if (rv && rv != EWOULDBLOCK) {
119 return 0;
120 }
121 }
122
123 return 0;
124 }
125
126 static inline uint8_t
127 tpm_status(struct tpm_softc *sc)
128 {
129 return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 TPM_STS_STATUS_BITS;
131 }
132
133 /* -------------------------------------------------------------------------- */
134
135 static bool
136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 static const uint8_t command[10] = {
139 0x00, 0xC1, /* TPM_TAG_RQU_COMMAND */
140 0x00, 0x00, 0x00, 10, /* Length in bytes */
141 0x00, 0x00, 0x00, 0x98 /* TPM_ORD_SaveState */
142 };
143 struct tpm_header response;
144
145 if ((*sc->sc_intf->write)(sc, &command, sizeof(command)) != 0)
146 return false;
147 if ((*sc->sc_intf->read)(sc, &response, sizeof(response), NULL, 0) != 0)
148 return false;
149 if (TPM_BE32(response.code) != 0)
150 return false;
151
152 return true;
153 }
154
155 static bool
156 tpm20_suspend(struct tpm_softc *sc)
157 {
158 static const uint8_t command[12] = {
159 0x80, 0x01, /* TPM_ST_NO_SESSIONS */
160 0x00, 0x00, 0x00, 12, /* Length in bytes */
161 0x00, 0x00, 0x01, 0x45, /* TPM_CC_Shutdown */
162 0x00, 0x01 /* TPM_SU_STATE */
163 };
164 struct tpm_header response;
165
166 if ((*sc->sc_intf->write)(sc, &command, sizeof(command)) != 0)
167 return false;
168 if ((*sc->sc_intf->read)(sc, &response, sizeof(response), NULL, 0) != 0)
169 return false;
170 if (TPM_BE32(response.code) != 0)
171 return false;
172
173 return true;
174 }
175
176 bool
177 tpm_suspend(device_t dev, const pmf_qual_t *qual)
178 {
179 struct tpm_softc *sc = device_private(dev);
180
181 switch (sc->sc_ver) {
182 case TPM_1_2:
183 return tpm12_suspend(sc);
184 case TPM_2_0:
185 return tpm20_suspend(sc);
186 default:
187 panic("%s: impossible", __func__);
188 }
189 }
190
191 bool
192 tpm_resume(device_t dev, const pmf_qual_t *qual)
193 {
194 /*
195 * Don't do anything, the BIOS is supposed to restore the previously
196 * saved state.
197 */
198 return true;
199 }
200
201 /* -------------------------------------------------------------------------- */
202
203 static int
204 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
205 {
206 int rv;
207
208 while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
209 rv = tsleep(chan, PCATCH, "tpm_poll", 1);
210 if (rv && rv != EWOULDBLOCK) {
211 return rv;
212 }
213 }
214
215 return 0;
216 }
217
218 static int
219 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
220 {
221 int retry, to, rv;
222 uint8_t todo;
223
224 to = tpm_tmotohz(tmo);
225 retry = 3;
226
227 restart:
228 todo = bits;
229
230 /*
231 * TPM_STS_VALID has priority over the others.
232 */
233 if (todo & TPM_STS_VALID) {
234 if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
235 return rv;
236 todo &= ~TPM_STS_VALID;
237 }
238
239 if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
240 return rv;
241
242 if ((todo & sc->sc_status) != todo) {
243 if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
244 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
245 TPM_STS_RESP_RETRY);
246 goto restart;
247 }
248 return EIO;
249 }
250
251 return 0;
252 }
253
254 /* -------------------------------------------------------------------------- */
255
256 /*
257 * TPM using the TIS 1.2 interface.
258 */
259
260 static int
261 tpm12_request_locality(struct tpm_softc *sc, int l)
262 {
263 uint32_t r;
264 int to, rv;
265
266 if (l != 0)
267 return EINVAL;
268
269 if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
270 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
271 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
272 return 0;
273
274 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
275 TPM_ACCESS_REQUEST_USE);
276
277 to = tpm_tmotohz(TPM_ACCESS_TMO);
278
279 while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
280 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
281 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
282 rv = tsleep(sc->sc_intf->init, PCATCH, "tpm_locality", 1);
283 if (rv && rv != EWOULDBLOCK) {
284 return rv;
285 }
286 }
287
288 if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
289 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
290 return EBUSY;
291 }
292
293 return 0;
294 }
295
296 static int
297 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
298 {
299 uint32_t cap;
300 uint8_t reg;
301 int tmo;
302
303 cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
304 if (cap == 0xffffffff)
305 return EINVAL;
306 if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
307 return ENOTSUP;
308
309 /* Request locality 0. */
310 bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
311
312 /* Wait for it to become active. */
313 tmo = TPM_ACCESS_TMO; /* Milliseconds. */
314 while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
315 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
316 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
317 DELAY(1000); /* 1 millisecond. */
318 }
319 if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
320 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
321 return ETIMEDOUT;
322 }
323
324 if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
325 return EINVAL;
326
327 return 0;
328 }
329
330 static int
331 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
332 {
333 /*
334 * TPM Specification Version 1.2, Main Part 3: Commands,
335 * Sec. 13.6 TPM_GetRandom
336 */
337 struct {
338 struct tpm_header hdr;
339 uint32_t bytesRequested;
340 } __packed command;
341 struct response {
342 struct tpm_header hdr;
343 uint32_t randomBytesSize;
344 uint8_t bytes[64];
345 } __packed response;
346 bool endwrite = false, endread = false;
347 size_t nread;
348 uint16_t tag;
349 uint32_t pktlen, code, nbytes, entropybits = 0;
350 int rv;
351
352 /* Encode the command. */
353 memset(&command, 0, sizeof(command));
354 command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
355 command.hdr.length = htobe32(sizeof(command));
356 command.hdr.code = htobe32(TPM_ORD_GetRandom);
357 command.bytesRequested = htobe32(sizeof(response.bytes));
358
359 /* Write the command. */
360 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
361 device_printf(sc->sc_dev, "start write failed, error=%d\n",
362 rv);
363 goto out;
364 }
365 endwrite = true;
366 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
367 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
368 goto out;
369 }
370 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
371 endwrite = false;
372 if (rv) {
373 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
374 goto out;
375 }
376
377 /* Read the response header. */
378 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
379 device_printf(sc->sc_dev, "start write failed, error=%d\n",
380 rv);
381 goto out;
382 }
383 endread = true;
384 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
385 &nread, 0)) != 0) {
386 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
387 goto out;
388 }
389
390 /* Verify the response header looks sensible. */
391 if (nread != sizeof(response.hdr)) {
392 device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
393 nread, sizeof(response.hdr));
394 goto out;
395 }
396 tag = be16toh(response.hdr.tag);
397 pktlen = be32toh(response.hdr.length);
398 code = be32toh(response.hdr.code);
399 if (tag != TPM_TAG_RSP_COMMAND ||
400 pktlen < offsetof(struct response, bytes) ||
401 pktlen > sizeof(response) ||
402 code != 0) {
403 /*
404 * If the tpm itself is busy (e.g., it has yet to run a
405 * self-test, or it's in a timeout period to defend
406 * against brute force attacks), then we can try again
407 * later. Otherwise, give up.
408 */
409 if (code & TPM_NON_FATAL) {
410 aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
411 __func__, code & ~TPM_NON_FATAL);
412 rv = 0;
413 } else if (code == TPM_DEACTIVATED) {
414 device_printf(sc->sc_dev, "tpm is deactivated\n");
415 rv = ENXIO;
416 } else {
417 device_printf(sc->sc_dev, "bad tpm response:"
418 " tag=%u len=%u code=%u\n", tag, pktlen, code);
419 hexdump(aprint_debug, "tpm response header",
420 (const void *)&response.hdr,
421 sizeof(response.hdr));
422 rv = EIO;
423 }
424 goto out;
425 }
426
427 /* Read the response payload. */
428 if ((rv = (*sc->sc_intf->read)(sc,
429 (char *)&response + nread, pktlen - nread,
430 NULL, TPM_PARAM_SIZE)) != 0) {
431 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
432 goto out;
433 }
434 endread = false;
435 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
436 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
437 goto out;
438 }
439
440 /* Verify the number of bytes read looks sensible. */
441 nbytes = be32toh(response.randomBytesSize);
442 if (nbytes > pktlen - offsetof(struct response, bytes)) {
443 device_printf(sc->sc_dev, "overlong GetRandom length:"
444 " %u, max %zu\n",
445 nbytes, pktlen - offsetof(struct response, bytes));
446 nbytes = pktlen - offsetof(struct response, bytes);
447 }
448
449 /*
450 * Enter the data into the entropy pool. Conservatively (or,
451 * perhaps, cargocultily) estimate half a bit of entropy per
452 * bit of data.
453 */
454 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
455 entropybits = (NBBY/2)*nbytes;
456 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
457
458 out: /* End the read or write if still ongoing. */
459 if (endread)
460 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
461 if (endwrite)
462 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
463
464 *entropybitsp = entropybits;
465 return rv;
466 }
467
468 static int
469 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
470 {
471 /*
472 * Trusted Platform Module Library, Family "2.0", Level 00
473 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
474 *
475 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
476 */
477 struct {
478 struct tpm_header hdr;
479 uint16_t bytesRequested;
480 } __packed command;
481 struct response {
482 struct tpm_header hdr;
483 uint16_t randomBytesSize;
484 uint8_t bytes[64];
485 } __packed response;
486 bool endwrite = false, endread = false;
487 size_t nread;
488 uint16_t tag;
489 uint32_t pktlen, code, nbytes, entropybits = 0;
490 int rv;
491
492 /* Encode the command. */
493 memset(&command, 0, sizeof(command));
494 command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
495 command.hdr.length = htobe32(sizeof(command));
496 command.hdr.code = htobe32(TPM2_CC_GetRandom);
497 command.bytesRequested = htobe16(sizeof(response.bytes));
498
499 /* Write the command. */
500 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
501 device_printf(sc->sc_dev, "start write failed, error=%d\n",
502 rv);
503 goto out;
504 }
505 endwrite = true;
506 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
507 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
508 goto out;
509 }
510 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
511 endwrite = false;
512 if (rv) {
513 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
514 goto out;
515 }
516
517 /* Read the response header. */
518 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
519 device_printf(sc->sc_dev, "start write failed, error=%d\n",
520 rv);
521 goto out;
522 }
523 endread = true;
524 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
525 &nread, 0)) != 0) {
526 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
527 goto out;
528 }
529
530 /* Verify the response header looks sensible. */
531 if (nread != sizeof(response.hdr)) {
532 device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
533 nread, sizeof(response.hdr));
534 goto out;
535 }
536 tag = be16toh(response.hdr.tag);
537 pktlen = be32toh(response.hdr.length);
538 code = be32toh(response.hdr.code);
539 if (tag != TPM2_ST_NO_SESSIONS ||
540 pktlen < offsetof(struct response, bytes) ||
541 pktlen > sizeof(response) ||
542 code != 0) {
543 /*
544 * If the tpm itself is busy (e.g., it has yet to run a
545 * self-test, or it's in a timeout period to defend
546 * against brute force attacks), then we can try again
547 * later. Otherwise, give up.
548 */
549 if (code & TPM2_RC_WARN) {
550 aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
551 " code=TPM_RC_WARN+0x%x\n",
552 __func__, code & ~TPM2_RC_WARN);
553 rv = 0;
554 } else {
555 device_printf(sc->sc_dev, "bad tpm response:"
556 " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
557 hexdump(aprint_debug, "tpm response header",
558 (const void *)&response.hdr,
559 sizeof(response.hdr));
560 rv = EIO;
561 }
562 goto out;
563 }
564
565 /* Read the response payload. */
566 if ((rv = (*sc->sc_intf->read)(sc,
567 (char *)&response + nread, pktlen - nread,
568 NULL, TPM_PARAM_SIZE)) != 0) {
569 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
570 goto out;
571 }
572 endread = false;
573 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
574 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
575 goto out;
576 }
577
578 /* Verify the number of bytes read looks sensible. */
579 nbytes = be16toh(response.randomBytesSize);
580 if (nbytes > pktlen - offsetof(struct response, bytes)) {
581 device_printf(sc->sc_dev, "overlong GetRandom length:"
582 " %u, max %zu\n",
583 nbytes, pktlen - offsetof(struct response, bytes));
584 nbytes = pktlen - offsetof(struct response, bytes);
585 }
586
587 /*
588 * Enter the data into the entropy pool. Conservatively (or,
589 * perhaps, cargocultily) estimate half a bit of entropy per
590 * bit of data.
591 */
592 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
593 entropybits = (NBBY/2)*nbytes;
594 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
595
596 out: /* End the read or write if still ongoing. */
597 if (endread)
598 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
599 if (endwrite)
600 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
601
602 *entropybitsp = entropybits;
603 return rv;
604 }
605
606 static void
607 tpm_rng_work(struct work *wk, void *cookie)
608 {
609 struct tpm_softc *sc = cookie;
610 unsigned nbytes, entropybits;
611 bool busy;
612 int rv;
613
614 /* Acknowledge the request. */
615 nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
616
617 /* Lock userland out of the tpm, or fail if it's already open. */
618 mutex_enter(&sc->sc_lock);
619 busy = sc->sc_busy;
620 sc->sc_busy = true;
621 mutex_exit(&sc->sc_lock);
622 if (busy) { /* tough */
623 aprint_debug_dev(sc->sc_dev, "%s: device in use\n", __func__);
624 return;
625 }
626
627 /*
628 * Issue as many commands as needed to fulfill the request, but
629 * stop if anything fails.
630 */
631 for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
632 switch (sc->sc_ver) {
633 case TPM_1_2:
634 rv = tpm12_rng(sc, &entropybits);
635 break;
636 case TPM_2_0:
637 rv = tpm20_rng(sc, &entropybits);
638 break;
639 default:
640 panic("bad tpm version: %d", sc->sc_ver);
641 }
642 if (rv)
643 break;
644 }
645
646 /*
647 * If the tpm is busted, no sense in trying again -- most
648 * likely, it is deactivated, and by the spec it cannot be
649 * reactivated until after a reboot.
650 */
651 if (rv) {
652 device_printf(sc->sc_dev, "deactivating entropy source\n");
653 rnd_detach_source(&sc->sc_rnd);
654 /* XXX worker thread can't workqueue_destroy its own queue */
655 }
656
657 /* Relinquish the tpm back to userland. */
658 mutex_enter(&sc->sc_lock);
659 KASSERT(sc->sc_busy);
660 sc->sc_busy = false;
661 mutex_exit(&sc->sc_lock);
662 }
663
664 static void
665 tpm_rng_get(size_t nbytes, void *cookie)
666 {
667 struct tpm_softc *sc = cookie;
668
669 if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
670 == 0)
671 workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
672 }
673
674 static int
675 tpm_tis12_init(struct tpm_softc *sc)
676 {
677 int rv;
678
679 sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
680 TPM_INTF_CAPABILITY);
681 sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
682 sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
683
684 aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
685 sc->sc_devid, sc->sc_rev);
686
687 if ((rv = tpm12_request_locality(sc, 0)) != 0)
688 return rv;
689
690 /* Abort whatever it thought it was doing. */
691 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
692
693 /* XXX Run this at higher priority? */
694 if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
695 tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
696 return rv;
697 rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
698 rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
699 RND_TYPE_RNG,
700 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
701
702 return 0;
703 }
704
705 static int
706 tpm_tis12_start(struct tpm_softc *sc, int rw)
707 {
708 int rv;
709
710 if (rw == UIO_READ) {
711 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
712 TPM_READ_TMO, sc->sc_intf->read);
713 return rv;
714 }
715
716 /* Request the 0th locality. */
717 if ((rv = tpm12_request_locality(sc, 0)) != 0)
718 return rv;
719
720 sc->sc_status = tpm_status(sc);
721 if (sc->sc_status & TPM_STS_CMD_READY)
722 return 0;
723
724 /* Abort previous and restart. */
725 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
726 rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
727 if (rv)
728 return rv;
729
730 return 0;
731 }
732
733 static int
734 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
735 int flags)
736 {
737 uint8_t *p = buf;
738 size_t cnt;
739 int rv, n;
740
741 cnt = 0;
742 while (len > 0) {
743 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
744 TPM_READ_TMO, sc->sc_intf->read);
745 if (rv)
746 return rv;
747
748 n = MIN(len, tpm_getburst(sc));
749 while (n > 0) {
750 *p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
751 cnt++;
752 len--;
753 n--;
754 }
755
756 if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
757 break;
758 }
759
760 if (count)
761 *count = cnt;
762
763 return 0;
764 }
765
766 static int
767 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
768 {
769 const uint8_t *p = buf;
770 size_t cnt;
771 int rv, r;
772
773 if (len == 0)
774 return 0;
775 if ((rv = tpm12_request_locality(sc, 0)) != 0)
776 return rv;
777
778 cnt = 0;
779 while (cnt < len - 1) {
780 for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
781 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
782 cnt++;
783 }
784 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
785 return rv;
786 }
787 sc->sc_status = tpm_status(sc);
788 if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
789 return EIO;
790 }
791 }
792
793 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
794 cnt++;
795
796 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
797 return rv;
798 }
799 if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
800 return EIO;
801 }
802
803 return 0;
804 }
805
806 static int
807 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
808 {
809 int rv = 0;
810
811 if (rw == UIO_READ) {
812 rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
813 if (rv)
814 return rv;
815
816 /* Still more data? */
817 sc->sc_status = tpm_status(sc);
818 if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
819 rv = EIO;
820 }
821
822 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
823 TPM_STS_CMD_READY);
824
825 /* Release the 0th locality. */
826 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
827 TPM_ACCESS_ACTIVE_LOCALITY);
828 } else {
829 /* Hungry for more? */
830 sc->sc_status = tpm_status(sc);
831 if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
832 rv = EIO;
833 }
834
835 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
836 err ? TPM_STS_CMD_READY : TPM_STS_GO);
837 }
838
839 return rv;
840 }
841
842 const struct tpm_intf tpm_intf_tis12 = {
843 .version = TIS_1_2,
844 .probe = tpm_tis12_probe,
845 .init = tpm_tis12_init,
846 .start = tpm_tis12_start,
847 .read = tpm_tis12_read,
848 .write = tpm_tis12_write,
849 .end = tpm_tis12_end
850 };
851
852 /* -------------------------------------------------------------------------- */
853
854 static dev_type_open(tpmopen);
855 static dev_type_close(tpmclose);
856 static dev_type_read(tpmread);
857 static dev_type_write(tpmwrite);
858 static dev_type_ioctl(tpmioctl);
859
860 const struct cdevsw tpm_cdevsw = {
861 .d_open = tpmopen,
862 .d_close = tpmclose,
863 .d_read = tpmread,
864 .d_write = tpmwrite,
865 .d_ioctl = tpmioctl,
866 .d_stop = nostop,
867 .d_tty = notty,
868 .d_poll = nopoll,
869 .d_mmap = nommap,
870 .d_kqfilter = nokqfilter,
871 .d_discard = nodiscard,
872 .d_flag = D_OTHER | D_MPSAFE,
873 };
874
875 static int
876 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
877 {
878 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
879 int ret = 0;
880
881 if (sc == NULL)
882 return ENXIO;
883
884 mutex_enter(&sc->sc_lock);
885 if (sc->sc_busy) {
886 ret = EBUSY;
887 } else {
888 sc->sc_busy = true;
889 }
890 mutex_exit(&sc->sc_lock);
891
892 return ret;
893 }
894
895 static int
896 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
897 {
898 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
899 int ret = 0;
900
901 if (sc == NULL)
902 return ENXIO;
903
904 mutex_enter(&sc->sc_lock);
905 if (!sc->sc_busy) {
906 ret = EINVAL;
907 } else {
908 sc->sc_busy = false;
909 }
910 mutex_exit(&sc->sc_lock);
911
912 return ret;
913 }
914
915 static int
916 tpmread(dev_t dev, struct uio *uio, int flags)
917 {
918 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
919 struct tpm_header hdr;
920 uint8_t buf[TPM_BUFSIZ];
921 size_t cnt, len, n;
922 int rv;
923
924 if (sc == NULL)
925 return ENXIO;
926
927 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
928 return rv;
929
930 /* Get the header. */
931 if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
932 goto out;
933 }
934 len = TPM_BE32(hdr.length);
935 if (len > uio->uio_resid || len < cnt) {
936 rv = EIO;
937 goto out;
938 }
939
940 /* Copy out the header. */
941 if ((rv = uiomove(&hdr, cnt, uio))) {
942 goto out;
943 }
944
945 /* Process the rest. */
946 len -= cnt;
947 while (len > 0) {
948 n = MIN(sizeof(buf), len);
949 if ((rv = (*sc->sc_intf->read)(sc, buf, n, NULL, TPM_PARAM_SIZE))) {
950 goto out;
951 }
952 if ((rv = uiomove(buf, n, uio))) {
953 goto out;
954 }
955 len -= n;
956 }
957
958 out:
959 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
960 return rv;
961 }
962
963 static int
964 tpmwrite(dev_t dev, struct uio *uio, int flags)
965 {
966 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
967 uint8_t buf[TPM_BUFSIZ];
968 int n, rv;
969
970 if (sc == NULL)
971 return ENXIO;
972
973 n = MIN(sizeof(buf), uio->uio_resid);
974 if ((rv = uiomove(buf, n, uio))) {
975 goto out;
976 }
977 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
978 goto out;
979 }
980 if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
981 goto out;
982 }
983
984 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
985 out:
986 return rv;
987 }
988
989 static int
990 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
991 {
992 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
993 struct tpm_ioc_getinfo *info;
994
995 if (sc == NULL)
996 return ENXIO;
997
998 switch (cmd) {
999 case TPM_IOC_GETINFO:
1000 info = addr;
1001 info->api_version = TPM_API_VERSION;
1002 info->tpm_version = sc->sc_ver;
1003 info->itf_version = sc->sc_intf->version;
1004 info->device_id = sc->sc_devid;
1005 info->device_rev = sc->sc_rev;
1006 info->device_caps = sc->sc_caps;
1007 return 0;
1008 default:
1009 break;
1010 }
1011
1012 return ENOTTY;
1013 }
1014