1 /* $NetBSD: tpm.c,v 1.29 2024/05/14 13:41:15 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 2008, 2009 Michael Shalayeff
34 * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35 * All rights reserved.
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.29 2024/05/14 13:41:15 riastradh Exp $");
52
53 #include <sys/param.h>
54 #include <sys/types.h>
55
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/pmf.h>
62 #include <sys/proc.h>
63 #include <sys/systm.h>
64 #include <sys/workqueue.h>
65
66 #include <dev/ic/tpmreg.h>
67 #include <dev/ic/tpmvar.h>
68
69 #include "ioconf.h"
70
71 CTASSERT(sizeof(struct tpm_header) == 10);
72
73 #define TPM_BUFSIZ 1024
74
75 #define TPM_PARAM_SIZE 0x0001 /* that's a flag */
76
77 /* Timeouts. */
78 #define TPM_ACCESS_TMO 2000 /* 2sec */
79 #define TPM_READY_TMO 2000 /* 2sec */
80 #define TPM_READ_TMO 2000 /* 2sec */
81 #define TPM_BURST_TMO 2000 /* 2sec */
82
83 #define TPM_CAPS_REQUIRED \
84 (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
85 TPM_INTF_INT_LEVEL_LOW)
86
87 static inline int
tpm_tmotohz(int tmo)88 tpm_tmotohz(int tmo)
89 {
90 struct timeval tv;
91
92 tv.tv_sec = tmo / 1000;
93 tv.tv_usec = 1000 * (tmo % 1000);
94
95 return tvtohz(&tv);
96 }
97
98 static int
tpm_getburst(struct tpm_softc * sc)99 tpm_getburst(struct tpm_softc *sc)
100 {
101 int burst, to, rv;
102
103 to = tpm_tmotohz(TPM_BURST_TMO);
104
105 while (to--) {
106 /*
107 * Burst count is in bits 23:8, so read the two higher bytes.
108 */
109 burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
110 burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
111 << 8;
112
113 if (burst)
114 return burst;
115
116 rv = kpause("tpm_getburst", /*intr*/true, /*timo*/1,
117 /*lock*/NULL);
118 if (rv && rv != EWOULDBLOCK) {
119 return 0;
120 }
121 }
122
123 return 0;
124 }
125
126 static inline uint8_t
tpm_status(struct tpm_softc * sc)127 tpm_status(struct tpm_softc *sc)
128 {
129 return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 TPM_STS_STATUS_BITS;
131 }
132
133 /* -------------------------------------------------------------------------- */
134
135 static bool
tpm12_suspend(struct tpm_softc * sc)136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 static const uint8_t command[10] = {
139 0x00, 0xC1, /* TPM_TAG_RQU_COMMAND */
140 0x00, 0x00, 0x00, 10, /* Length in bytes */
141 0x00, 0x00, 0x00, 0x98 /* TPM_ORD_SaveState */
142 };
143 struct tpm_header response;
144 size_t nread;
145 bool endwrite = false, endread = false;
146 int error;
147
148 /*
149 * Write the command.
150 */
151 error = (*sc->sc_intf->start)(sc, UIO_WRITE);
152 if (error) {
153 device_printf(sc->sc_dev, "start write failed: %d\n", error);
154 goto out;
155 }
156
157 endwrite = true;
158
159 error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
160 if (error) {
161 device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed:"
162 " %d\n", error);
163 goto out;
164 }
165
166 endwrite = false;
167
168 error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
169 if (error) {
170 device_printf(sc->sc_dev, "end write failed: %d\n", error);
171 goto out;
172 }
173
174 /*
175 * Read the response -- just the header; we don't expect a
176 * payload.
177 */
178 error = (*sc->sc_intf->start)(sc, UIO_READ);
179 if (error) {
180 device_printf(sc->sc_dev, "start read failed: %d\n", error);
181 goto out;
182 }
183
184 endread = true;
185
186 error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
187 0);
188 if (error) {
189 device_printf(sc->sc_dev, "read failed: %d\n", error);
190 goto out;
191 }
192 if (nread != sizeof(response)) {
193 device_printf(sc->sc_dev, "short header read: %zu\n", nread);
194 goto out;
195 }
196
197 endread = false;
198
199 error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
200 if (error) {
201 device_printf(sc->sc_dev, "end read failed: %d\n", error);
202 goto out;
203 }
204
205 /*
206 * Verify the response looks reasonable.
207 */
208 if (be16toh(response.tag) != TPM_TAG_RSP_COMMAND ||
209 be32toh(response.length) != sizeof(response) ||
210 be32toh(response.code) != 0) {
211 device_printf(sc->sc_dev,
212 "TPM_ORD_SaveState failed:"
213 " tag=0x%x length=0x%x code=0x%x\n",
214 be16toh(response.tag),
215 be32toh(response.length),
216 be32toh(response.code));
217 error = EIO;
218 goto out;
219 }
220
221 /* Success! */
222 error = 0;
223
224 out: if (endwrite)
225 error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
226 if (endread)
227 error = (*sc->sc_intf->end)(sc, UIO_READ, error);
228 if (error)
229 return false;
230 return true;
231 }
232
233 static bool
tpm20_suspend(struct tpm_softc * sc)234 tpm20_suspend(struct tpm_softc *sc)
235 {
236 static const uint8_t command[12] = {
237 0x80, 0x01, /* TPM_ST_NO_SESSIONS */
238 0x00, 0x00, 0x00, 12, /* Length in bytes */
239 0x00, 0x00, 0x01, 0x45, /* TPM_CC_Shutdown */
240 0x00, 0x01 /* TPM_SU_STATE */
241 };
242 struct tpm_header response;
243 size_t nread;
244 bool endwrite = false, endread = false;
245 int error;
246
247 /*
248 * Write the command.
249 */
250 error = (*sc->sc_intf->start)(sc, UIO_WRITE);
251 if (error) {
252 device_printf(sc->sc_dev, "start write failed: %d\n", error);
253 goto out;
254 }
255
256 endwrite = true;
257
258 error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
259 if (error) {
260 device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed:"
261 " %d\n", error);
262 goto out;
263 }
264
265 endwrite = false;
266
267 error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
268 if (error) {
269 device_printf(sc->sc_dev, "end write failed: %d\n", error);
270 goto out;
271 }
272
273 /*
274 * Read the response -- just the header; we don't expect a
275 * payload.
276 */
277 error = (*sc->sc_intf->start)(sc, UIO_READ);
278 if (error) {
279 device_printf(sc->sc_dev, "start read failed: %d\n", error);
280 goto out;
281 }
282
283 endread = true;
284
285 error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
286 0);
287 if (error) {
288 device_printf(sc->sc_dev, "read failed: %d\n", error);
289 goto out;
290 }
291 if (nread != sizeof(response)) {
292 device_printf(sc->sc_dev, "short header read: %zu\n", nread);
293 goto out;
294 }
295
296 endread = false;
297
298 error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
299 if (error) {
300 device_printf(sc->sc_dev, "end read failed: %d\n", error);
301 goto out;
302 }
303
304 /*
305 * Verify the response looks reasonable.
306 */
307 if (be16toh(response.tag) != TPM2_ST_NO_SESSIONS ||
308 be32toh(response.length) != sizeof(response) ||
309 be32toh(response.code) != TPM2_RC_SUCCESS) {
310 device_printf(sc->sc_dev,
311 "TPM_CC_Shutdown failed: tag=0x%x length=0x%x code=0x%x\n",
312 be16toh(response.tag),
313 be32toh(response.length),
314 be32toh(response.code));
315 error = EIO;
316 goto out;
317 }
318
319 /* Success! */
320 error = 0;
321
322 out: if (endwrite)
323 error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
324 if (endread)
325 error = (*sc->sc_intf->end)(sc, UIO_READ, error);
326 if (error)
327 return false;
328 return true;
329 }
330
331 bool
tpm_suspend(device_t dev,const pmf_qual_t * qual)332 tpm_suspend(device_t dev, const pmf_qual_t *qual)
333 {
334 struct tpm_softc *sc = device_private(dev);
335
336 switch (sc->sc_ver) {
337 case TPM_1_2:
338 return tpm12_suspend(sc);
339 case TPM_2_0:
340 return tpm20_suspend(sc);
341 default:
342 panic("%s: impossible", __func__);
343 }
344 }
345
346 bool
tpm_resume(device_t dev,const pmf_qual_t * qual)347 tpm_resume(device_t dev, const pmf_qual_t *qual)
348 {
349 /*
350 * Don't do anything, the BIOS is supposed to restore the previously
351 * saved state.
352 */
353 return true;
354 }
355
356 /* -------------------------------------------------------------------------- */
357
358 static int
tpm_poll(struct tpm_softc * sc,uint8_t mask,int to,wchan_t chan)359 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
360 {
361 int rv;
362
363 while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
364 rv = kpause("tpm_poll", /*intr*/true, /*timo*/1, /*lock*/NULL);
365 if (rv && rv != EWOULDBLOCK) {
366 return rv;
367 }
368 }
369
370 return 0;
371 }
372
373 static int
tpm_waitfor(struct tpm_softc * sc,uint8_t bits,int tmo,wchan_t chan)374 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
375 {
376 int retry, to, rv;
377 uint8_t todo;
378
379 to = tpm_tmotohz(tmo);
380 retry = 3;
381
382 restart:
383 todo = bits;
384
385 /*
386 * TPM_STS_VALID has priority over the others.
387 */
388 if (todo & TPM_STS_VALID) {
389 if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
390 return rv;
391 todo &= ~TPM_STS_VALID;
392 }
393
394 if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
395 return rv;
396
397 if ((todo & sc->sc_status) != todo) {
398 if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
399 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
400 TPM_STS_RESP_RETRY);
401 goto restart;
402 }
403 return EIO;
404 }
405
406 return 0;
407 }
408
409 /* -------------------------------------------------------------------------- */
410
411 /*
412 * TPM using the TIS 1.2 interface.
413 */
414
415 static int
tpm12_request_locality(struct tpm_softc * sc,int l)416 tpm12_request_locality(struct tpm_softc *sc, int l)
417 {
418 uint32_t r;
419 int to, rv;
420
421 if (l != 0)
422 return EINVAL;
423
424 if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
425 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
426 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
427 return 0;
428
429 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
430 TPM_ACCESS_REQUEST_USE);
431
432 to = tpm_tmotohz(TPM_ACCESS_TMO);
433
434 while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
435 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
436 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
437 rv = kpause("tpm_locality", /*intr*/true, /*timo*/1,
438 /*lock*/NULL);
439 if (rv && rv != EWOULDBLOCK) {
440 return rv;
441 }
442 }
443
444 if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
445 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
446 return EBUSY;
447 }
448
449 return 0;
450 }
451
452 static int
tpm_tis12_probe(bus_space_tag_t bt,bus_space_handle_t bh)453 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
454 {
455 uint32_t cap;
456 uint8_t reg;
457 int tmo;
458
459 cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
460 if (cap == 0xffffffff)
461 return EINVAL;
462 if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
463 return ENOTSUP;
464
465 /* Request locality 0. */
466 bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
467
468 /* Wait for it to become active. */
469 tmo = TPM_ACCESS_TMO; /* Milliseconds. */
470 while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
471 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
472 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
473 DELAY(1000); /* 1 millisecond. */
474 }
475 if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
476 (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
477 return ETIMEDOUT;
478 }
479
480 if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
481 return EINVAL;
482
483 return 0;
484 }
485
486 static int
tpm12_rng(struct tpm_softc * sc,unsigned * entropybitsp)487 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
488 {
489 /*
490 * TPM Specification Version 1.2, Main Part 3: Commands,
491 * Sec. 13.6 TPM_GetRandom
492 */
493 struct {
494 struct tpm_header hdr;
495 uint32_t bytesRequested;
496 } __packed command;
497 struct response {
498 struct tpm_header hdr;
499 uint32_t randomBytesSize;
500 uint8_t bytes[64];
501 } __packed response;
502 bool endwrite = false, endread = false;
503 size_t nread;
504 uint16_t tag;
505 uint32_t pktlen, code, nbytes, entropybits = 0;
506 int rv;
507
508 /* Encode the command. */
509 memset(&command, 0, sizeof(command));
510 command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
511 command.hdr.length = htobe32(sizeof(command));
512 command.hdr.code = htobe32(TPM_ORD_GetRandom);
513 command.bytesRequested = htobe32(sizeof(response.bytes));
514
515 /* Write the command. */
516 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
517 device_printf(sc->sc_dev, "start write failed, error=%d\n",
518 rv);
519 goto out;
520 }
521 endwrite = true;
522 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
523 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
524 goto out;
525 }
526 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
527 endwrite = false;
528 if (rv) {
529 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
530 goto out;
531 }
532
533 /* Read the response header. */
534 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
535 device_printf(sc->sc_dev, "start write failed, error=%d\n",
536 rv);
537 goto out;
538 }
539 endread = true;
540 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
541 &nread, 0)) != 0) {
542 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
543 goto out;
544 }
545
546 /* Verify the response header looks sensible. */
547 if (nread != sizeof(response.hdr)) {
548 device_printf(sc->sc_dev, "read %zu bytes, expected %zu\n",
549 nread, sizeof(response.hdr));
550 goto out;
551 }
552 tag = be16toh(response.hdr.tag);
553 pktlen = be32toh(response.hdr.length);
554 code = be32toh(response.hdr.code);
555 if (tag != TPM_TAG_RSP_COMMAND ||
556 pktlen < offsetof(struct response, bytes) ||
557 pktlen > sizeof(response) ||
558 code != 0) {
559 /*
560 * If the tpm itself is busy (e.g., it has yet to run a
561 * self-test, or it's in a timeout period to defend
562 * against brute force attacks), then we can try again
563 * later. Otherwise, give up.
564 */
565 if (code & TPM_NON_FATAL) {
566 aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
567 __func__, code & ~TPM_NON_FATAL);
568 rv = 0;
569 } else if (code == TPM_DEACTIVATED) {
570 device_printf(sc->sc_dev, "tpm is deactivated\n");
571 rv = ENXIO;
572 } else {
573 device_printf(sc->sc_dev, "bad tpm response:"
574 " tag=%u len=%u code=%u\n", tag, pktlen, code);
575 hexdump(aprint_debug, "tpm response header",
576 (const void *)&response.hdr,
577 sizeof(response.hdr));
578 rv = EIO;
579 }
580 goto out;
581 }
582
583 /* Read the response payload. */
584 if ((rv = (*sc->sc_intf->read)(sc,
585 (char *)&response + nread, pktlen - nread,
586 NULL, TPM_PARAM_SIZE)) != 0) {
587 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
588 goto out;
589 }
590 endread = false;
591 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
592 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
593 goto out;
594 }
595
596 /* Verify the number of bytes read looks sensible. */
597 nbytes = be32toh(response.randomBytesSize);
598 if (nbytes > pktlen - offsetof(struct response, bytes)) {
599 device_printf(sc->sc_dev, "overlong GetRandom length:"
600 " %u, max %zu\n",
601 nbytes, pktlen - offsetof(struct response, bytes));
602 nbytes = pktlen - offsetof(struct response, bytes);
603 }
604
605 /*
606 * Enter the data into the entropy pool. Conservatively (or,
607 * perhaps, cargocultily) estimate half a bit of entropy per
608 * bit of data.
609 */
610 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
611 entropybits = (NBBY/2)*nbytes;
612 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
613
614 out: /* End the read or write if still ongoing. */
615 if (endread)
616 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
617 if (endwrite)
618 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
619
620 *entropybitsp = entropybits;
621 return rv;
622 }
623
624 static int
tpm20_rng(struct tpm_softc * sc,unsigned * entropybitsp)625 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
626 {
627 /*
628 * Trusted Platform Module Library, Family "2.0", Level 00
629 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
630 *
631 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
632 */
633 struct {
634 struct tpm_header hdr;
635 uint16_t bytesRequested;
636 } __packed command;
637 struct response {
638 struct tpm_header hdr;
639 uint16_t randomBytesSize;
640 uint8_t bytes[64];
641 } __packed response;
642 bool endwrite = false, endread = false;
643 size_t nread;
644 uint16_t tag;
645 uint32_t pktlen, code, nbytes, entropybits = 0;
646 int rv;
647
648 /* Encode the command. */
649 memset(&command, 0, sizeof(command));
650 command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
651 command.hdr.length = htobe32(sizeof(command));
652 command.hdr.code = htobe32(TPM2_CC_GetRandom);
653 command.bytesRequested = htobe16(sizeof(response.bytes));
654
655 /* Write the command. */
656 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
657 device_printf(sc->sc_dev, "start write failed, error=%d\n",
658 rv);
659 goto out;
660 }
661 endwrite = true;
662 if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
663 device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
664 goto out;
665 }
666 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
667 endwrite = false;
668 if (rv) {
669 device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
670 goto out;
671 }
672
673 /* Read the response header. */
674 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
675 device_printf(sc->sc_dev, "start write failed, error=%d\n",
676 rv);
677 goto out;
678 }
679 endread = true;
680 if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
681 &nread, 0)) != 0) {
682 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
683 goto out;
684 }
685
686 /* Verify the response header looks sensible. */
687 if (nread != sizeof(response.hdr)) {
688 device_printf(sc->sc_dev, "read %zu bytes, expected %zu\n",
689 nread, sizeof(response.hdr));
690 goto out;
691 }
692 tag = be16toh(response.hdr.tag);
693 pktlen = be32toh(response.hdr.length);
694 code = be32toh(response.hdr.code);
695 if (tag != TPM2_ST_NO_SESSIONS ||
696 pktlen < offsetof(struct response, bytes) ||
697 pktlen > sizeof(response) ||
698 code != 0) {
699 /*
700 * If the tpm itself is busy (e.g., it has yet to run a
701 * self-test, or it's in a timeout period to defend
702 * against brute force attacks), then we can try again
703 * later. Otherwise, give up.
704 */
705 if (code & TPM2_RC_WARN) {
706 aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
707 " code=TPM_RC_WARN+0x%x\n",
708 __func__, code & ~TPM2_RC_WARN);
709 rv = 0;
710 } else {
711 device_printf(sc->sc_dev, "bad tpm response:"
712 " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
713 hexdump(aprint_debug, "tpm response header",
714 (const void *)&response.hdr,
715 sizeof(response.hdr));
716 rv = EIO;
717 }
718 goto out;
719 }
720
721 /* Read the response payload. */
722 if ((rv = (*sc->sc_intf->read)(sc,
723 (char *)&response + nread, pktlen - nread,
724 NULL, TPM_PARAM_SIZE)) != 0) {
725 device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
726 goto out;
727 }
728 endread = false;
729 if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
730 device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
731 goto out;
732 }
733
734 /* Verify the number of bytes read looks sensible. */
735 nbytes = be16toh(response.randomBytesSize);
736 if (nbytes > pktlen - offsetof(struct response, bytes)) {
737 device_printf(sc->sc_dev, "overlong GetRandom length:"
738 " %u, max %zu\n",
739 nbytes, pktlen - offsetof(struct response, bytes));
740 nbytes = pktlen - offsetof(struct response, bytes);
741 }
742
743 /*
744 * Enter the data into the entropy pool. Conservatively (or,
745 * perhaps, cargocultily) estimate half a bit of entropy per
746 * bit of data.
747 */
748 CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
749 entropybits = (NBBY/2)*nbytes;
750 rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
751
752 out: /* End the read or write if still ongoing. */
753 if (endread)
754 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
755 if (endwrite)
756 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
757
758 *entropybitsp = entropybits;
759 return rv;
760 }
761
762 static void
tpm_rng_work(struct work * wk,void * cookie)763 tpm_rng_work(struct work *wk, void *cookie)
764 {
765 struct tpm_softc *sc = cookie;
766 unsigned nbytes, entropybits;
767 int rv;
768
769 /* Acknowledge the request. */
770 nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
771
772 /* Lock the tpm while we do I/O transactions with it. */
773 mutex_enter(&sc->sc_lock);
774
775 /*
776 * Issue as many commands as needed to fulfill the request, but
777 * stop if anything fails.
778 */
779 for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
780 switch (sc->sc_ver) {
781 case TPM_1_2:
782 rv = tpm12_rng(sc, &entropybits);
783 break;
784 case TPM_2_0:
785 rv = tpm20_rng(sc, &entropybits);
786 break;
787 default:
788 panic("bad tpm version: %d", sc->sc_ver);
789 }
790 if (rv)
791 break;
792 }
793
794 /*
795 * If the tpm is busted, no sense in trying again -- most
796 * likely, it is deactivated, and by the spec it cannot be
797 * reactivated until after a reboot.
798 */
799 if (rv) {
800 device_printf(sc->sc_dev, "deactivating entropy source\n");
801 atomic_store_relaxed(&sc->sc_rnddisabled, true);
802 /* XXX worker thread can't workqueue_destroy its own queue */
803 }
804
805 /* Relinquish the tpm. */
806 mutex_exit(&sc->sc_lock);
807 }
808
809 static void
tpm_rng_get(size_t nbytes,void * cookie)810 tpm_rng_get(size_t nbytes, void *cookie)
811 {
812 struct tpm_softc *sc = cookie;
813
814 if (atomic_load_relaxed(&sc->sc_rnddisabled))
815 return; /* tough */
816 if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
817 == 0)
818 workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
819 }
820
821 static int
tpm_tis12_init(struct tpm_softc * sc)822 tpm_tis12_init(struct tpm_softc *sc)
823 {
824 int rv;
825
826 sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
827 TPM_INTF_CAPABILITY);
828 sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
829 sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
830
831 aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
832 sc->sc_devid, sc->sc_rev);
833
834 if ((rv = tpm12_request_locality(sc, 0)) != 0)
835 return rv;
836
837 /* Abort whatever it thought it was doing. */
838 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
839
840 /* XXX Run this at higher priority? */
841 if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
842 tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
843 return rv;
844 rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
845 rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
846 RND_TYPE_RNG,
847 RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
848
849 return 0;
850 }
851
852 static int
tpm_tis12_start(struct tpm_softc * sc,int rw)853 tpm_tis12_start(struct tpm_softc *sc, int rw)
854 {
855 int rv;
856
857 if (rw == UIO_READ) {
858 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
859 TPM_READ_TMO, sc->sc_intf->read);
860 return rv;
861 }
862
863 /* Request the 0th locality. */
864 if ((rv = tpm12_request_locality(sc, 0)) != 0)
865 return rv;
866
867 sc->sc_status = tpm_status(sc);
868 if (sc->sc_status & TPM_STS_CMD_READY)
869 return 0;
870
871 /* Abort previous and restart. */
872 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
873 rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
874 if (rv)
875 return rv;
876
877 return 0;
878 }
879
880 static int
tpm_tis12_read(struct tpm_softc * sc,void * buf,size_t len,size_t * count,int flags)881 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
882 int flags)
883 {
884 uint8_t *p = buf;
885 size_t cnt;
886 int rv, n;
887
888 cnt = 0;
889 while (len > 0) {
890 rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
891 TPM_READ_TMO, sc->sc_intf->read);
892 if (rv)
893 return rv;
894
895 n = MIN(len, tpm_getburst(sc));
896 while (n > 0) {
897 *p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
898 cnt++;
899 len--;
900 n--;
901 }
902
903 if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
904 break;
905 }
906
907 if (count)
908 *count = cnt;
909
910 return 0;
911 }
912
913 static int
tpm_tis12_write(struct tpm_softc * sc,const void * buf,size_t len)914 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
915 {
916 const uint8_t *p = buf;
917 size_t cnt;
918 int rv, r;
919
920 if (len == 0)
921 return 0;
922 if ((rv = tpm12_request_locality(sc, 0)) != 0)
923 return rv;
924
925 cnt = 0;
926 while (cnt < len - 1) {
927 for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
928 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
929 cnt++;
930 }
931 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
932 return rv;
933 }
934 sc->sc_status = tpm_status(sc);
935 if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
936 return EIO;
937 }
938 }
939
940 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
941 cnt++;
942
943 if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
944 return rv;
945 }
946 if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
947 return EIO;
948 }
949
950 return 0;
951 }
952
953 static int
tpm_tis12_end(struct tpm_softc * sc,int rw,int err)954 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
955 {
956 int rv = 0;
957
958 if (rw == UIO_READ) {
959 rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
960 if (rv)
961 goto out;
962
963 /* Still more data? */
964 sc->sc_status = tpm_status(sc);
965 if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
966 rv = EIO;
967 }
968
969 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
970 TPM_STS_CMD_READY);
971
972 /* Release the 0th locality. */
973 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
974 TPM_ACCESS_ACTIVE_LOCALITY);
975 } else {
976 /* Hungry for more? */
977 sc->sc_status = tpm_status(sc);
978 if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
979 rv = EIO;
980 }
981
982 bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
983 err ? TPM_STS_CMD_READY : TPM_STS_GO);
984 }
985
986 out: return err ? err : rv;
987 }
988
989 const struct tpm_intf tpm_intf_tis12 = {
990 .version = TIS_1_2,
991 .probe = tpm_tis12_probe,
992 .init = tpm_tis12_init,
993 .start = tpm_tis12_start,
994 .read = tpm_tis12_read,
995 .write = tpm_tis12_write,
996 .end = tpm_tis12_end
997 };
998
999 /* -------------------------------------------------------------------------- */
1000
1001 static dev_type_open(tpmopen);
1002 static dev_type_close(tpmclose);
1003 static dev_type_read(tpmread);
1004 static dev_type_write(tpmwrite);
1005 static dev_type_ioctl(tpmioctl);
1006
1007 const struct cdevsw tpm_cdevsw = {
1008 .d_open = tpmopen,
1009 .d_close = tpmclose,
1010 .d_read = tpmread,
1011 .d_write = tpmwrite,
1012 .d_ioctl = tpmioctl,
1013 .d_stop = nostop,
1014 .d_tty = notty,
1015 .d_poll = nopoll,
1016 .d_mmap = nommap,
1017 .d_kqfilter = nokqfilter,
1018 .d_discard = nodiscard,
1019 .d_flag = D_OTHER | D_MPSAFE,
1020 };
1021
1022 static int
tpmopen(dev_t dev,int flag,int mode,struct lwp * l)1023 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
1024 {
1025 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1026 int ret = 0;
1027
1028 if (sc == NULL)
1029 return ENXIO;
1030
1031 mutex_enter(&sc->sc_lock);
1032 if (sc->sc_busy) {
1033 ret = EBUSY;
1034 } else {
1035 sc->sc_busy = true;
1036 }
1037 mutex_exit(&sc->sc_lock);
1038
1039 return ret;
1040 }
1041
1042 static int
tpmclose(dev_t dev,int flag,int mode,struct lwp * l)1043 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
1044 {
1045 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1046 int ret = 0;
1047
1048 if (sc == NULL)
1049 return ENXIO;
1050
1051 mutex_enter(&sc->sc_lock);
1052 if (!sc->sc_busy) {
1053 ret = EINVAL;
1054 } else {
1055 sc->sc_busy = false;
1056 }
1057 mutex_exit(&sc->sc_lock);
1058
1059 return ret;
1060 }
1061
1062 static int
tpmread(dev_t dev,struct uio * uio,int flags)1063 tpmread(dev_t dev, struct uio *uio, int flags)
1064 {
1065 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1066 struct tpm_header hdr;
1067 uint8_t buf[TPM_BUFSIZ];
1068 size_t cnt, len = 0/*XXXGCC*/;
1069 bool end = false;
1070 int rv;
1071
1072 if (sc == NULL)
1073 return ENXIO;
1074
1075 mutex_enter(&sc->sc_lock);
1076
1077 if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
1078 goto out;
1079 end = true;
1080
1081 /* Get the header. */
1082 if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
1083 goto out;
1084 }
1085 if (cnt != sizeof(hdr)) {
1086 rv = EIO;
1087 goto out;
1088 }
1089 len = be32toh(hdr.length);
1090 if (len > MIN(sizeof(buf), uio->uio_resid) || len < sizeof(hdr)) {
1091 rv = EIO;
1092 goto out;
1093 }
1094
1095 /* Get the payload. */
1096 len -= sizeof(hdr);
1097 if ((rv = (*sc->sc_intf->read)(sc, buf, len, NULL, TPM_PARAM_SIZE))) {
1098 goto out;
1099 }
1100
1101 out: if (end)
1102 rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
1103
1104 mutex_exit(&sc->sc_lock);
1105
1106 /* If anything went wrong, stop here -- nothing to copy out. */
1107 if (rv)
1108 return rv;
1109
1110 /* Copy out the header. */
1111 if ((rv = uiomove(&hdr, sizeof(hdr), uio))) {
1112 return rv;
1113 }
1114
1115 /* Copy out the payload. */
1116 if ((rv = uiomove(buf, len, uio))) {
1117 return rv;
1118 }
1119
1120 /* Success! */
1121 return 0;
1122 }
1123
1124 static int
tpmwrite(dev_t dev,struct uio * uio,int flags)1125 tpmwrite(dev_t dev, struct uio *uio, int flags)
1126 {
1127 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1128 uint8_t buf[TPM_BUFSIZ];
1129 bool end = false;
1130 int n, rv;
1131
1132 if (sc == NULL)
1133 return ENXIO;
1134
1135 n = MIN(sizeof(buf), uio->uio_resid);
1136 if ((rv = uiomove(buf, n, uio))) {
1137 return rv;
1138 }
1139
1140 mutex_enter(&sc->sc_lock);
1141
1142 if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
1143 goto out;
1144 }
1145 end = true;
1146
1147 if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
1148 goto out;
1149 }
1150
1151 out: if (end)
1152 rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
1153
1154 mutex_exit(&sc->sc_lock);
1155 return rv;
1156 }
1157
1158 static int
tpmioctl(dev_t dev,u_long cmd,void * addr,int flag,struct lwp * l)1159 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1160 {
1161 struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1162 struct tpm_ioc_getinfo *info;
1163
1164 if (sc == NULL)
1165 return ENXIO;
1166
1167 switch (cmd) {
1168 case TPM_IOC_GETINFO:
1169 info = addr;
1170 info->api_version = TPM_API_VERSION;
1171 info->tpm_version = sc->sc_ver;
1172 info->itf_version = sc->sc_intf->version;
1173 info->device_id = sc->sc_devid;
1174 info->device_rev = sc->sc_rev;
1175 info->device_caps = sc->sc_caps;
1176 return 0;
1177 default:
1178 break;
1179 }
1180
1181 return ENOTTY;
1182 }
1183