tpm.c revision 1.13.2.2 1 /* $NetBSD: tpm.c,v 1.13.2.2 2022/08/03 16:00:47 martin Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2008, 2009 Michael Shalayeff
34 * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35 * All rights reserved.
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.13.2.2 2022/08/03 16:00:47 martin Exp $");
52
53 #include <sys/param.h>
54 #include <sys/types.h>
55
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/pmf.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/workqueue.h>
66
67 #include <dev/ic/tpmreg.h>
68 #include <dev/ic/tpmvar.h>
69
70 #include "ioconf.h"
71
72 CTASSERT(sizeof(struct tpm_header) == 10);
73
74 #define TPM_BUFSIZ 1024
75
76 #define TPM_PARAM_SIZE 0x0001 /* that's a flag */
77
78 /* Timeouts. */
79 #define TPM_ACCESS_TMO 2000 /* 2sec */
80 #define TPM_READY_TMO 2000 /* 2sec */
81 #define TPM_READ_TMO 2000 /* 2sec */
82 #define TPM_BURST_TMO 2000 /* 2sec */
83
84 #define TPM_CAPS_REQUIRED \
85 (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
86 TPM_INTF_INT_LEVEL_LOW)
87
88 static inline int
89 tpm_tmotohz(int tmo)
90 {
91 struct timeval tv;
92
93 tv.tv_sec = tmo / 1000;
94 tv.tv_usec = 1000 * (tmo % 1000);
95
96 return tvtohz(&tv);
97 }
98
99 static int
100 tpm_getburst(struct tpm_softc *sc)
101 {
102 int burst, to, rv;
103
104 to = tpm_tmotohz(TPM_BURST_TMO);
105
106 while (to--) {
107 /*
108 * Burst count is in bits 23:8, so read the two higher bytes.
109 */
110 burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
111 burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
112 << 8;
113
114 if (burst)
115 return burst;
116
117 rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
118 if (rv && rv != EWOULDBLOCK) {
119 return 0;
120 }
121 }
122
123 return 0;
124 }
125
126 static inline uint8_t
127 tpm_status(struct tpm_softc *sc)
128 {
129 return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 TPM_STS_STATUS_BITS;
131 }
132
133 /* -------------------------------------------------------------------------- */
134
135 static bool
136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 static const uint8_t command[10] = {
139 0x00, 0xC1, /* TPM_TAG_RQU_COMMAND */
140 0x00, 0x00, 0x00, 10, /* Length in bytes */
141 0x00, 0x00, 0x00, 0x98 /* TPM_ORD_SaveState */
142 };
143 struct tpm_header response;
144 size_t nread;
145 bool endwrite = false, endread = false;
146 int error;
147
148 /*
149 * Write the command.
150 */
151 error = (*sc->sc_intf->start)(sc, UIO_WRITE);
152 if (error) {
153 device_printf(sc->sc_dev, "start write failed: %d", error);
154 goto out;
155 }
156
157 endwrite = true;
158
159 error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
160 if (error) {
161 device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
162 error);
163 goto out;
164 }
165
166 endwrite = false;
167
168 error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
169 if (error) {
170 device_printf(sc->sc_dev, "end write failed: %d", error);
171 goto out;
172 }
173
174 /*
175 * Read the response -- just the header; we don't expect a
176 * payload.
177 */
178 error = (*sc->sc_intf->start)(sc, UIO_READ);
179 if (error) {
180 device_printf(sc->sc_dev, "start read failed: %d", error);
181 goto out;
182 }
183
184 endread = true;
185
186 error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
187 0);
188 if (error) {
189 device_printf(sc->sc_dev, "read failed: %d", error);
190 goto out;
191 }
192 if (nread != sizeof(response)) {
193 device_printf(sc->sc_dev, "short header read: %zu", nread);
194 goto out;
195 }
196
197 endread = false;
198
199 error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
200 if (error) {
201 device_printf(sc->sc_dev, "end read failed: %d", error);
202 goto out;
203 }
204
205 /*
206 * Verify the response looks reasonable.
207 */
208 if (be16toh(response.tag) != TPM_TAG_RSP_COMMAND ||
209 be32toh(response.length) != sizeof(response) ||
210 be32toh(response.code) != 0) {
211 device_printf(sc->sc_dev,
212 "TPM_ORD_SaveState failed: tag=0x%x length=0x%x code=0x%x",
213 be16toh(response.tag),
214 be32toh(response.length),
215 be32toh(response.code));
216 error = EIO;
217 goto out;
218 }
219
220 /* Success! */
221 error = 0;
222
223 out: if (endwrite)
224 error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
225 if (endread)
226 error = (*sc->sc_intf->end)(sc, UIO_READ, error);
227 if (error)
228 return false;
229 return true;
230 }
231
232 static bool
233 tpm20_suspend(struct tpm_softc *sc)
234 {
235 static const uint8_t command[12] = {
236 0x80, 0x01, /* TPM_ST_NO_SESSIONS */
237 0x00, 0x00, 0x00, 12, /* Length in bytes */
238 0x00, 0x00, 0x01, 0x45, /* TPM_CC_Shutdown */
239 0x00, 0x01 /* TPM_SU_STATE */
240 };
241 struct tpm_header response;
242 size_t nread;
243 bool endwrite = false, endread = false;
244 int error;
245
246 /*
247 * Write the command.
248 */
249 error = (*sc->sc_intf->start)(sc, UIO_WRITE);
250 if (error) {
251 device_printf(sc->sc_dev, "start write failed: %d", error);
252 goto out;
253 }
254
255 endwrite = true;
256
257 error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
258 if (error) {
259 device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
260 error);
261 goto out;
262 }
263
264 endwrite = false;
265
266 error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
267 if (error) {
268 device_printf(sc->sc_dev, "end write failed: %d", error);
269 goto out;
270 }
271
272 /*
273 * Read the response -- just the header; we don't expect a
274 * payload.
275 */
276 error = (*sc->sc_intf->start)(sc, UIO_READ);
277 if (error) {
278 device_printf(sc->sc_dev, "start read failed: %d", error);
279 goto out;
280 }
281
282 endread = true;
283
284 error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
285 0);
286 if (error) {
287 device_printf(sc->sc_dev, "read failed: %d", error);
288 goto out;
289 }
290 if (nread != sizeof(response)) {
291 device_printf(sc->sc_dev, "short header read: %zu", nread);
292 goto out;
293 }
294
295 endread = false;
296
297 error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
298 if (error) {
299 device_printf(sc->sc_dev, "end read failed: %d", error);
300 goto out;
301 }
302
303 /*
304 * Verify the response looks reasonable.
305 */
306 if (be16toh(response.tag) != TPM2_ST_NO_SESSIONS ||
307 be32toh(response.length) != sizeof(response) ||
308 be32toh(response.code) != TPM2_RC_SUCCESS) {
309 device_printf(sc->sc_dev,
310 "TPM_CC_Shutdown failed: tag=0x%x length=0x%x code=0x%x",
311 be16toh(response.tag),
312 be32toh(response.length),
313 be32toh(response.code));
314 error = EIO;
315 goto out;
316 }
317
318 /* Success! */
319 error = 0;
320
321 out: if (endwrite)
322 error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
323 if (endread)
324 error = (*sc->sc_intf->end)(sc, UIO_READ, error);
325 if (error)
326 return false;
327 return true;
328 }
329
330 bool
331 tpm_suspend(device_t dev, const pmf_qual_t *qual)
332 {
333 struct tpm_softc *sc = device_private(dev);
334
335 switch (sc->sc_ver) {
336 case TPM_1_2:
337 return tpm12_suspend(sc);
338 case TPM_2_0:
339 return tpm20_suspend(sc);
340 default:
341 panic("%s: impossible", __func__);
342 }
343 }
344
345 bool
346 tpm_resume(device_t dev, const pmf_qual_t *qual)
347 {
348 /*
349 * Don't do anything, the BIOS is supposed to restore the previously
350 * saved state.
351 */
352 return true;
353 }
354
355 /* -------------------------------------------------------------------------- */
356
357 static int
358 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
359 {
360 int rv;
361
362 while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
363 rv = tsleep(chan, PCATCH, "tpm_poll", 1);
364 if (rv && rv != EWOULDBLOCK) {
365 return rv;
366 }
367 }
368
369 return 0;
370 }
371
372 static int
373 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
374 {
375 int retry, to, rv;
376 uint8_t todo;
377
378 to = tpm_tmotohz(tmo);
379 retry = 3;
380
381 restart:
382 todo = bits;
383
384 /*
385 * TPM_STS_VALID has priority over the others.
386 */
387 if (todo & TPM_STS_VALID) {
388 if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
389 return rv;
390 todo &= ~TPM_STS_VALID;
391 }
392
393 if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
394 return rv;
395
396 if ((todo & sc->sc_status) != todo) {
397 if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
398 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
399 TPM_STS_RESP_RETRY);
400 goto restart;
401 }
402 return EIO;
403 }
404
405 return 0;
406 }
407
408 /* -------------------------------------------------------------------------- */
409
410 /*
411 * TPM using the TIS 1.2 interface.
412 */
413
414 static int
415 tpm12_request_locality(struct tpm_softc *sc, int l)
416 {
417 uint32_t r;
418 int to, rv;
419
420 if (l != 0)
421 return EINVAL;
422
423 if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
424 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
425 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
426 return 0;
427
428 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
429 TPM_ACCESS_REQUEST_USE);
430
431 to = tpm_tmotohz(TPM_ACCESS_TMO);
432
433 while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
434 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
435 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
436 rv = tsleep(sc->sc_intf->init, PCATCH, "tpm_locality", 1);
437 if (rv && rv != EWOULDBLOCK) {
438 return rv;
439 }
440 }
441
442 if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
443 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
444 return EBUSY;
445 }
446
447 return 0;
448 }
449
450 static int
451 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
452 {
453 uint32_t cap;
454 uint8_t reg;
455 int tmo;
456
457 cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
458 if (cap == 0xffffffff)
459 return EINVAL;
460 if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
461 return ENOTSUP;
462
463 /* Request locality 0. */
464 bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
465
466 /* Wait for it to become active. */
467 tmo = TPM_ACCESS_TMO; /* Milliseconds. */
468 while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
469 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
470 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
471 DELAY(1000); /* 1 millisecond. */
472 }
473 if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
474 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
475 return ETIMEDOUT;
476 }
477
478 if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
479 return EINVAL;
480
481 return 0;
482 }
483
484 static int
485 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
486 {
487 /*
488 * TPM Specification Version 1.2, Main Part 3: Commands,
489 * Sec. 13.6 TPM_GetRandom
490 */
491 struct {
492 struct tpm_header hdr;
493 uint32_t bytesRequested;
494 } __packed command;
495 struct response {
496 struct tpm_header hdr;
497 uint32_t randomBytesSize;
498 uint8_t bytes[64];
499 } __packed response;
500 bool endwrite = false, endread = false;
501 size_t nread;
502 uint16_t tag;
503 uint32_t pktlen, code, nbytes, entropybits = 0;
504 int rv;
505
506 /* Encode the command. */
507 memset(&command, 0, sizeof(command));
508 command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
509 command.hdr.length = htobe32(sizeof(command));
510 command.hdr.code = htobe32(TPM_ORD_GetRandom);
511 command.bytesRequested = htobe32(sizeof(response.bytes));
512
513 /* Write the command. */
514 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
515 device_printf(sc->sc_dev, "start write failed, error=%d\n",
516 rv);
517 goto out;
518 }
519 endwrite = true;
520 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
521 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
522 goto out;
523 }
524 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
525 endwrite = false;
526 if (rv) {
527 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
528 goto out;
529 }
530
531 /* Read the response header. */
532 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
533 device_printf(sc->sc_dev, "start write failed, error=%d\n",
534 rv);
535 goto out;
536 }
537 endread = true;
538 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
539 &nread, 0)) != 0) {
540 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
541 goto out;
542 }
543
544 /* Verify the response header looks sensible. */
545 if (nread != sizeof(response.hdr)) {
546 device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
547 nread, sizeof(response.hdr));
548 goto out;
549 }
550 tag = be16toh(response.hdr.tag);
551 pktlen = be32toh(response.hdr.length);
552 code = be32toh(response.hdr.code);
553 if (tag != TPM_TAG_RSP_COMMAND ||
554 pktlen < offsetof(struct response, bytes) ||
555 pktlen > sizeof(response) ||
556 code != 0) {
557 /*
558 * If the tpm itself is busy (e.g., it has yet to run a
559 * self-test, or it's in a timeout period to defend
560 * against brute force attacks), then we can try again
561 * later. Otherwise, give up.
562 */
563 if (code & TPM_NON_FATAL) {
564 aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
565 __func__, code & ~TPM_NON_FATAL);
566 rv = 0;
567 } else if (code == TPM_DEACTIVATED) {
568 device_printf(sc->sc_dev, "tpm is deactivated\n");
569 rv = ENXIO;
570 } else {
571 device_printf(sc->sc_dev, "bad tpm response:"
572 " tag=%u len=%u code=%u\n", tag, pktlen, code);
573 hexdump(aprint_debug, "tpm response header",
574 (const void *)&response.hdr,
575 sizeof(response.hdr));
576 rv = EIO;
577 }
578 goto out;
579 }
580
581 /* Read the response payload. */
582 if ((rv = (*sc->sc_intf->read)(sc,
583 (char *)&response + nread, pktlen - nread,
584 NULL, TPM_PARAM_SIZE)) != 0) {
585 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
586 goto out;
587 }
588 endread = false;
589 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
590 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
591 goto out;
592 }
593
594 /* Verify the number of bytes read looks sensible. */
595 nbytes = be32toh(response.randomBytesSize);
596 if (nbytes > pktlen - offsetof(struct response, bytes)) {
597 device_printf(sc->sc_dev, "overlong GetRandom length:"
598 " %u, max %zu\n",
599 nbytes, pktlen - offsetof(struct response, bytes));
600 nbytes = pktlen - offsetof(struct response, bytes);
601 }
602
603 /*
604 * Enter the data into the entropy pool. Conservatively (or,
605 * perhaps, cargocultily) estimate half a bit of entropy per
606 * bit of data.
607 */
608 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
609 entropybits = (NBBY/2)*nbytes;
610 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
611
612 out: /* End the read or write if still ongoing. */
613 if (endread)
614 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
615 if (endwrite)
616 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
617
618 *entropybitsp = entropybits;
619 return rv;
620 }
621
622 static int
623 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
624 {
625 /*
626 * Trusted Platform Module Library, Family "2.0", Level 00
627 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
628 *
629 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
630 */
631 struct {
632 struct tpm_header hdr;
633 uint16_t bytesRequested;
634 } __packed command;
635 struct response {
636 struct tpm_header hdr;
637 uint16_t randomBytesSize;
638 uint8_t bytes[64];
639 } __packed response;
640 bool endwrite = false, endread = false;
641 size_t nread;
642 uint16_t tag;
643 uint32_t pktlen, code, nbytes, entropybits = 0;
644 int rv;
645
646 /* Encode the command. */
647 memset(&command, 0, sizeof(command));
648 command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
649 command.hdr.length = htobe32(sizeof(command));
650 command.hdr.code = htobe32(TPM2_CC_GetRandom);
651 command.bytesRequested = htobe16(sizeof(response.bytes));
652
653 /* Write the command. */
654 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
655 device_printf(sc->sc_dev, "start write failed, error=%d\n",
656 rv);
657 goto out;
658 }
659 endwrite = true;
660 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
661 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
662 goto out;
663 }
664 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
665 endwrite = false;
666 if (rv) {
667 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
668 goto out;
669 }
670
671 /* Read the response header. */
672 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
673 device_printf(sc->sc_dev, "start write failed, error=%d\n",
674 rv);
675 goto out;
676 }
677 endread = true;
678 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
679 &nread, 0)) != 0) {
680 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
681 goto out;
682 }
683
684 /* Verify the response header looks sensible. */
685 if (nread != sizeof(response.hdr)) {
686 device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
687 nread, sizeof(response.hdr));
688 goto out;
689 }
690 tag = be16toh(response.hdr.tag);
691 pktlen = be32toh(response.hdr.length);
692 code = be32toh(response.hdr.code);
693 if (tag != TPM2_ST_NO_SESSIONS ||
694 pktlen < offsetof(struct response, bytes) ||
695 pktlen > sizeof(response) ||
696 code != 0) {
697 /*
698 * If the tpm itself is busy (e.g., it has yet to run a
699 * self-test, or it's in a timeout period to defend
700 * against brute force attacks), then we can try again
701 * later. Otherwise, give up.
702 */
703 if (code & TPM2_RC_WARN) {
704 aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
705 " code=TPM_RC_WARN+0x%x\n",
706 __func__, code & ~TPM2_RC_WARN);
707 rv = 0;
708 } else {
709 device_printf(sc->sc_dev, "bad tpm response:"
710 " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
711 hexdump(aprint_debug, "tpm response header",
712 (const void *)&response.hdr,
713 sizeof(response.hdr));
714 rv = EIO;
715 }
716 goto out;
717 }
718
719 /* Read the response payload. */
720 if ((rv = (*sc->sc_intf->read)(sc,
721 (char *)&response + nread, pktlen - nread,
722 NULL, TPM_PARAM_SIZE)) != 0) {
723 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
724 goto out;
725 }
726 endread = false;
727 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
728 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
729 goto out;
730 }
731
732 /* Verify the number of bytes read looks sensible. */
733 nbytes = be16toh(response.randomBytesSize);
734 if (nbytes > pktlen - offsetof(struct response, bytes)) {
735 device_printf(sc->sc_dev, "overlong GetRandom length:"
736 " %u, max %zu\n",
737 nbytes, pktlen - offsetof(struct response, bytes));
738 nbytes = pktlen - offsetof(struct response, bytes);
739 }
740
741 /*
742 * Enter the data into the entropy pool. Conservatively (or,
743 * perhaps, cargocultily) estimate half a bit of entropy per
744 * bit of data.
745 */
746 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
747 entropybits = (NBBY/2)*nbytes;
748 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
749
750 out: /* End the read or write if still ongoing. */
751 if (endread)
752 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
753 if (endwrite)
754 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
755
756 *entropybitsp = entropybits;
757 return rv;
758 }
759
760 static void
761 tpm_rng_work(struct work *wk, void *cookie)
762 {
763 struct tpm_softc *sc = cookie;
764 unsigned nbytes, entropybits;
765 int rv;
766
767 /* Acknowledge the request. */
768 nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
769
770 /* Lock the tpm while we do I/O transactions with it. */
771 mutex_enter(&sc->sc_lock);
772
773 /*
774 * Issue as many commands as needed to fulfill the request, but
775 * stop if anything fails.
776 */
777 for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
778 switch (sc->sc_ver) {
779 case TPM_1_2:
780 rv = tpm12_rng(sc, &entropybits);
781 break;
782 case TPM_2_0:
783 rv = tpm20_rng(sc, &entropybits);
784 break;
785 default:
786 panic("bad tpm version: %d", sc->sc_ver);
787 }
788 if (rv)
789 break;
790 }
791
792 /*
793 * If the tpm is busted, no sense in trying again -- most
794 * likely, it is deactivated, and by the spec it cannot be
795 * reactivated until after a reboot.
796 */
797 if (rv) {
798 device_printf(sc->sc_dev, "deactivating entropy source\n");
799 atomic_store_relaxed(&sc->sc_rnddisabled, true);
800 /* XXX worker thread can't workqueue_destroy its own queue */
801 }
802
803 /* Relinquish the tpm. */
804 mutex_exit(&sc->sc_lock);
805 }
806
807 static void
808 tpm_rng_get(size_t nbytes, void *cookie)
809 {
810 struct tpm_softc *sc = cookie;
811
812 if (atomic_load_relaxed(&sc->sc_rnddisabled))
813 return; /* tough */
814 if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
815 == 0)
816 workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
817 }
818
819 static int
820 tpm_tis12_init(struct tpm_softc *sc)
821 {
822 int rv;
823
824 aprint_naive("\n");
825 aprint_normal("\n");
826
827 sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
828 TPM_INTF_CAPABILITY);
829 sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
830 sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
831
832 aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
833 sc->sc_devid, sc->sc_rev);
834
835 if ((rv = tpm12_request_locality(sc, 0)) != 0)
836 return rv;
837
838 /* Abort whatever it thought it was doing. */
839 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
840
841 /* XXX Run this at higher priority? */
842 if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
843 tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
844 return rv;
845 rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
846 rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
847 RND_TYPE_RNG,
848 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
849
850 return 0;
851 }
852
853 static int
854 tpm_tis12_start(struct tpm_softc *sc, int rw)
855 {
856 int rv;
857
858 if (rw == UIO_READ) {
859 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
860 TPM_READ_TMO, sc->sc_intf->read);
861 return rv;
862 }
863
864 /* Request the 0th locality. */
865 if ((rv = tpm12_request_locality(sc, 0)) != 0)
866 return rv;
867
868 sc->sc_status = tpm_status(sc);
869 if (sc->sc_status & TPM_STS_CMD_READY)
870 return 0;
871
872 /* Abort previous and restart. */
873 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
874 rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
875 if (rv)
876 return rv;
877
878 return 0;
879 }
880
881 static int
882 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
883 int flags)
884 {
885 uint8_t *p = buf;
886 size_t cnt;
887 int rv, n;
888
889 cnt = 0;
890 while (len > 0) {
891 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
892 TPM_READ_TMO, sc->sc_intf->read);
893 if (rv)
894 return rv;
895
896 n = MIN(len, tpm_getburst(sc));
897 while (n > 0) {
898 *p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
899 cnt++;
900 len--;
901 n--;
902 }
903
904 if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
905 break;
906 }
907
908 if (count)
909 *count = cnt;
910
911 return 0;
912 }
913
914 static int
915 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
916 {
917 const uint8_t *p = buf;
918 size_t cnt;
919 int rv, r;
920
921 if (len == 0)
922 return 0;
923 if ((rv = tpm12_request_locality(sc, 0)) != 0)
924 return rv;
925
926 cnt = 0;
927 while (cnt < len - 1) {
928 for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
929 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
930 cnt++;
931 }
932 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
933 return rv;
934 }
935 sc->sc_status = tpm_status(sc);
936 if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
937 return EIO;
938 }
939 }
940
941 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
942 cnt++;
943
944 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
945 return rv;
946 }
947 if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
948 return EIO;
949 }
950
951 return 0;
952 }
953
954 static int
955 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
956 {
957 int rv = 0;
958
959 if (rw == UIO_READ) {
960 rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
961 if (rv)
962 goto out;
963
964 /* Still more data? */
965 sc->sc_status = tpm_status(sc);
966 if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
967 rv = EIO;
968 }
969
970 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
971 TPM_STS_CMD_READY);
972
973 /* Release the 0th locality. */
974 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
975 TPM_ACCESS_ACTIVE_LOCALITY);
976 } else {
977 /* Hungry for more? */
978 sc->sc_status = tpm_status(sc);
979 if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
980 rv = EIO;
981 }
982
983 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
984 err ? TPM_STS_CMD_READY : TPM_STS_GO);
985 }
986
987 out: return err ? err : rv;
988 }
989
990 const struct tpm_intf tpm_intf_tis12 = {
991 .version = TIS_1_2,
992 .probe = tpm_tis12_probe,
993 .init = tpm_tis12_init,
994 .start = tpm_tis12_start,
995 .read = tpm_tis12_read,
996 .write = tpm_tis12_write,
997 .end = tpm_tis12_end
998 };
999
1000 /* -------------------------------------------------------------------------- */
1001
1002 static dev_type_open(tpmopen);
1003 static dev_type_close(tpmclose);
1004 static dev_type_read(tpmread);
1005 static dev_type_write(tpmwrite);
1006 static dev_type_ioctl(tpmioctl);
1007
1008 const struct cdevsw tpm_cdevsw = {
1009 .d_open = tpmopen,
1010 .d_close = tpmclose,
1011 .d_read = tpmread,
1012 .d_write = tpmwrite,
1013 .d_ioctl = tpmioctl,
1014 .d_stop = nostop,
1015 .d_tty = notty,
1016 .d_poll = nopoll,
1017 .d_mmap = nommap,
1018 .d_kqfilter = nokqfilter,
1019 .d_discard = nodiscard,
1020 .d_flag = D_OTHER | D_MPSAFE,
1021 };
1022
1023 static int
1024 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
1025 {
1026 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1027 int ret = 0;
1028
1029 if (sc == NULL)
1030 return ENXIO;
1031
1032 mutex_enter(&sc->sc_lock);
1033 if (sc->sc_busy) {
1034 ret = EBUSY;
1035 } else {
1036 sc->sc_busy = true;
1037 }
1038 mutex_exit(&sc->sc_lock);
1039
1040 return ret;
1041 }
1042
1043 static int
1044 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
1045 {
1046 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1047 int ret = 0;
1048
1049 if (sc == NULL)
1050 return ENXIO;
1051
1052 mutex_enter(&sc->sc_lock);
1053 if (!sc->sc_busy) {
1054 ret = EINVAL;
1055 } else {
1056 sc->sc_busy = false;
1057 }
1058 mutex_exit(&sc->sc_lock);
1059
1060 return ret;
1061 }
1062
1063 static int
1064 tpmread(dev_t dev, struct uio *uio, int flags)
1065 {
1066 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1067 struct tpm_header hdr;
1068 uint8_t buf[TPM_BUFSIZ];
1069 size_t cnt, len = 0/*XXXGCC*/;
1070 bool end = false;
1071 int rv;
1072
1073 if (sc == NULL)
1074 return ENXIO;
1075
1076 mutex_enter(&sc->sc_lock);
1077
1078 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
1079 goto out;
1080 end = true;
1081
1082 /* Get the header. */
1083 if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
1084 goto out;
1085 }
1086 if (cnt != sizeof(hdr)) {
1087 rv = EIO;
1088 goto out;
1089 }
1090 len = be32toh(hdr.length);
1091 if (len > MIN(sizeof(buf), uio->uio_resid) || len < sizeof(hdr)) {
1092 rv = EIO;
1093 goto out;
1094 }
1095
1096 /* Get the payload. */
1097 len -= sizeof(hdr);
1098 if ((rv = (*sc->sc_intf->read)(sc, buf, len, NULL, TPM_PARAM_SIZE))) {
1099 goto out;
1100 }
1101
1102 out: if (end)
1103 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
1104
1105 mutex_exit(&sc->sc_lock);
1106
1107 /* If anything went wrong, stop here -- nothing to copy out. */
1108 if (rv)
1109 return rv;
1110
1111 /* Copy out the header. */
1112 if ((rv = uiomove(&hdr, sizeof(hdr), uio))) {
1113 return rv;
1114 }
1115
1116 /* Copy out the payload. */
1117 if ((rv = uiomove(buf, len, uio))) {
1118 return rv;
1119 }
1120
1121 /* Success! */
1122 return 0;
1123 }
1124
1125 static int
1126 tpmwrite(dev_t dev, struct uio *uio, int flags)
1127 {
1128 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1129 uint8_t buf[TPM_BUFSIZ];
1130 bool end = false;
1131 int n, rv;
1132
1133 if (sc == NULL)
1134 return ENXIO;
1135
1136 n = MIN(sizeof(buf), uio->uio_resid);
1137 if ((rv = uiomove(buf, n, uio))) {
1138 return rv;
1139 }
1140
1141 mutex_enter(&sc->sc_lock);
1142
1143 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
1144 goto out;
1145 }
1146 end = true;
1147
1148 if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
1149 goto out;
1150 }
1151
1152 out: if (end)
1153 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
1154
1155 mutex_exit(&sc->sc_lock);
1156 return rv;
1157 }
1158
1159 static int
1160 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1161 {
1162 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1163 struct tpm_ioc_getinfo *info;
1164
1165 if (sc == NULL)
1166 return ENXIO;
1167
1168 switch (cmd) {
1169 case TPM_IOC_GETINFO:
1170 info = addr;
1171 info->api_version = TPM_API_VERSION;
1172 info->tpm_version = sc->sc_ver;
1173 info->itf_version = sc->sc_intf->version;
1174 info->device_id = sc->sc_devid;
1175 info->device_rev = sc->sc_rev;
1176 info->device_caps = sc->sc_caps;
1177 return 0;
1178 default:
1179 break;
1180 }
1181
1182 return ENOTTY;
1183 }
1184