xref: /netbsd-src/sys/dev/ic/tpm.c (revision 901e7e84758515fbf39dfc064cb0b45ab146d8b0)
1 /*	$NetBSD: tpm.c,v 1.28 2023/07/04 01:02:26 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2008, 2009 Michael Shalayeff
34  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35  * All rights reserved.
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.28 2023/07/04 01:02:26 riastradh Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/types.h>
55 
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/pmf.h>
62 #include <sys/proc.h>
63 #include <sys/systm.h>
64 #include <sys/workqueue.h>
65 
66 #include <dev/ic/tpmreg.h>
67 #include <dev/ic/tpmvar.h>
68 
69 #include "ioconf.h"
70 
71 CTASSERT(sizeof(struct tpm_header) == 10);
72 
73 #define TPM_BUFSIZ	1024
74 
75 #define TPM_PARAM_SIZE	0x0001	/* that's a flag */
76 
77 /* Timeouts. */
78 #define TPM_ACCESS_TMO	2000	/* 2sec */
79 #define TPM_READY_TMO	2000	/* 2sec */
80 #define TPM_READ_TMO	2000	/* 2sec */
81 #define TPM_BURST_TMO	2000	/* 2sec */
82 
83 #define TPM_CAPS_REQUIRED \
84 	(TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
85 	 TPM_INTF_INT_LEVEL_LOW)
86 
87 static inline int
88 tpm_tmotohz(int tmo)
89 {
90 	struct timeval tv;
91 
92 	tv.tv_sec = tmo / 1000;
93 	tv.tv_usec = 1000 * (tmo % 1000);
94 
95 	return tvtohz(&tv);
96 }
97 
98 static int
99 tpm_getburst(struct tpm_softc *sc)
100 {
101 	int burst, to, rv;
102 
103 	to = tpm_tmotohz(TPM_BURST_TMO);
104 
105 	while (to--) {
106 		/*
107 		 * Burst count is in bits 23:8, so read the two higher bytes.
108 		 */
109 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
110 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
111 		    << 8;
112 
113 		if (burst)
114 			return burst;
115 
116 		rv = kpause("tpm_getburst", /*intr*/true, /*timo*/1,
117 		    /*lock*/NULL);
118 		if (rv && rv != EWOULDBLOCK) {
119 			return 0;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 static inline uint8_t
127 tpm_status(struct tpm_softc *sc)
128 {
129 	return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 	    TPM_STS_STATUS_BITS;
131 }
132 
133 /* -------------------------------------------------------------------------- */
134 
135 static bool
136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 	static const uint8_t command[10] = {
139 		0x00, 0xC1,		/* TPM_TAG_RQU_COMMAND */
140 		0x00, 0x00, 0x00, 10,	/* Length in bytes */
141 		0x00, 0x00, 0x00, 0x98	/* TPM_ORD_SaveState */
142 	};
143 	struct tpm_header response;
144 	size_t nread;
145 	bool endwrite = false, endread = false;
146 	int error;
147 
148 	/*
149 	 * Write the command.
150 	 */
151 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
152 	if (error) {
153 		device_printf(sc->sc_dev, "start write failed: %d", error);
154 		goto out;
155 	}
156 
157 	endwrite = true;
158 
159 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
160 	if (error) {
161 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
162 		    error);
163 		goto out;
164 	}
165 
166 	endwrite = false;
167 
168 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
169 	if (error) {
170 		device_printf(sc->sc_dev, "end write failed: %d", error);
171 		goto out;
172 	}
173 
174 	/*
175 	 * Read the response -- just the header; we don't expect a
176 	 * payload.
177 	 */
178 	error = (*sc->sc_intf->start)(sc, UIO_READ);
179 	if (error) {
180 		device_printf(sc->sc_dev, "start read failed: %d", error);
181 		goto out;
182 	}
183 
184 	endread = true;
185 
186 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
187 	    0);
188 	if (error) {
189 		device_printf(sc->sc_dev, "read failed: %d", error);
190 		goto out;
191 	}
192 	if (nread != sizeof(response)) {
193 		device_printf(sc->sc_dev, "short header read: %zu", nread);
194 		goto out;
195 	}
196 
197 	endread = false;
198 
199 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
200 	if (error) {
201 		device_printf(sc->sc_dev, "end read failed: %d", error);
202 		goto out;
203 	}
204 
205 	/*
206 	 * Verify the response looks reasonable.
207 	 */
208 	if (be16toh(response.tag) != TPM_TAG_RSP_COMMAND ||
209 	    be32toh(response.length) != sizeof(response) ||
210 	    be32toh(response.code) != 0) {
211 		device_printf(sc->sc_dev,
212 		    "TPM_ORD_SaveState failed: tag=0x%x length=0x%x code=0x%x",
213 		    be16toh(response.tag),
214 		    be32toh(response.length),
215 		    be32toh(response.code));
216 		error = EIO;
217 		goto out;
218 	}
219 
220 	/* Success!  */
221 	error = 0;
222 
223 out:	if (endwrite)
224 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
225 	if (endread)
226 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
227 	if (error)
228 		return false;
229 	return true;
230 }
231 
232 static bool
233 tpm20_suspend(struct tpm_softc *sc)
234 {
235 	static const uint8_t command[12] = {
236 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS */
237 		0x00, 0x00, 0x00, 12,	/* Length in bytes */
238 		0x00, 0x00, 0x01, 0x45,	/* TPM_CC_Shutdown */
239 		0x00, 0x01		/* TPM_SU_STATE */
240 	};
241 	struct tpm_header response;
242 	size_t nread;
243 	bool endwrite = false, endread = false;
244 	int error;
245 
246 	/*
247 	 * Write the command.
248 	 */
249 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
250 	if (error) {
251 		device_printf(sc->sc_dev, "start write failed: %d", error);
252 		goto out;
253 	}
254 
255 	endwrite = true;
256 
257 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
258 	if (error) {
259 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
260 		    error);
261 		goto out;
262 	}
263 
264 	endwrite = false;
265 
266 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
267 	if (error) {
268 		device_printf(sc->sc_dev, "end write failed: %d", error);
269 		goto out;
270 	}
271 
272 	/*
273 	 * Read the response -- just the header; we don't expect a
274 	 * payload.
275 	 */
276 	error = (*sc->sc_intf->start)(sc, UIO_READ);
277 	if (error) {
278 		device_printf(sc->sc_dev, "start read failed: %d", error);
279 		goto out;
280 	}
281 
282 	endread = true;
283 
284 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
285 	    0);
286 	if (error) {
287 		device_printf(sc->sc_dev, "read failed: %d", error);
288 		goto out;
289 	}
290 	if (nread != sizeof(response)) {
291 		device_printf(sc->sc_dev, "short header read: %zu", nread);
292 		goto out;
293 	}
294 
295 	endread = false;
296 
297 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
298 	if (error) {
299 		device_printf(sc->sc_dev, "end read failed: %d", error);
300 		goto out;
301 	}
302 
303 	/*
304 	 * Verify the response looks reasonable.
305 	 */
306 	if (be16toh(response.tag) != TPM2_ST_NO_SESSIONS ||
307 	    be32toh(response.length) != sizeof(response) ||
308 	    be32toh(response.code) != TPM2_RC_SUCCESS) {
309 		device_printf(sc->sc_dev,
310 		    "TPM_CC_Shutdown failed: tag=0x%x length=0x%x code=0x%x",
311 		    be16toh(response.tag),
312 		    be32toh(response.length),
313 		    be32toh(response.code));
314 		error = EIO;
315 		goto out;
316 	}
317 
318 	/* Success!  */
319 	error = 0;
320 
321 out:	if (endwrite)
322 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
323 	if (endread)
324 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
325 	if (error)
326 		return false;
327 	return true;
328 }
329 
330 bool
331 tpm_suspend(device_t dev, const pmf_qual_t *qual)
332 {
333 	struct tpm_softc *sc = device_private(dev);
334 
335 	switch (sc->sc_ver) {
336 	case TPM_1_2:
337 		return tpm12_suspend(sc);
338 	case TPM_2_0:
339 		return tpm20_suspend(sc);
340 	default:
341 		panic("%s: impossible", __func__);
342 	}
343 }
344 
345 bool
346 tpm_resume(device_t dev, const pmf_qual_t *qual)
347 {
348 	/*
349 	 * Don't do anything, the BIOS is supposed to restore the previously
350 	 * saved state.
351 	 */
352 	return true;
353 }
354 
355 /* -------------------------------------------------------------------------- */
356 
357 static int
358 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
359 {
360 	int rv;
361 
362 	while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
363 		rv = kpause("tpm_poll", /*intr*/true, /*timo*/1, /*lock*/NULL);
364 		if (rv && rv != EWOULDBLOCK) {
365 			return rv;
366 		}
367 	}
368 
369 	return 0;
370 }
371 
372 static int
373 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
374 {
375 	int retry, to, rv;
376 	uint8_t todo;
377 
378 	to = tpm_tmotohz(tmo);
379 	retry = 3;
380 
381 restart:
382 	todo = bits;
383 
384 	/*
385 	 * TPM_STS_VALID has priority over the others.
386 	 */
387 	if (todo & TPM_STS_VALID) {
388 		if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
389 			return rv;
390 		todo &= ~TPM_STS_VALID;
391 	}
392 
393 	if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
394 		return rv;
395 
396 	if ((todo & sc->sc_status) != todo) {
397 		if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
398 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
399 			    TPM_STS_RESP_RETRY);
400 			goto restart;
401 		}
402 		return EIO;
403 	}
404 
405 	return 0;
406 }
407 
408 /* -------------------------------------------------------------------------- */
409 
410 /*
411  * TPM using the TIS 1.2 interface.
412  */
413 
414 static int
415 tpm12_request_locality(struct tpm_softc *sc, int l)
416 {
417 	uint32_t r;
418 	int to, rv;
419 
420 	if (l != 0)
421 		return EINVAL;
422 
423 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
424 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
425 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
426 		return 0;
427 
428 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
429 	    TPM_ACCESS_REQUEST_USE);
430 
431 	to = tpm_tmotohz(TPM_ACCESS_TMO);
432 
433 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
434 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
435 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
436 		rv = kpause("tpm_locality", /*intr*/true, /*timo*/1,
437 		    /*lock*/NULL);
438 		if (rv && rv != EWOULDBLOCK) {
439 			return rv;
440 		}
441 	}
442 
443 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
444 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
445 		return EBUSY;
446 	}
447 
448 	return 0;
449 }
450 
451 static int
452 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
453 {
454 	uint32_t cap;
455 	uint8_t reg;
456 	int tmo;
457 
458 	cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
459 	if (cap == 0xffffffff)
460 		return EINVAL;
461 	if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
462 		return ENOTSUP;
463 
464 	/* Request locality 0. */
465 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
466 
467 	/* Wait for it to become active. */
468 	tmo = TPM_ACCESS_TMO; /* Milliseconds. */
469 	while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
470 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
471 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
472 		DELAY(1000); /* 1 millisecond. */
473 	}
474 	if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
475 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
476 		return ETIMEDOUT;
477 	}
478 
479 	if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
480 		return EINVAL;
481 
482 	return 0;
483 }
484 
485 static int
486 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
487 {
488 	/*
489 	 * TPM Specification Version 1.2, Main Part 3: Commands,
490 	 * Sec. 13.6 TPM_GetRandom
491 	 */
492 	struct {
493 		struct tpm_header hdr;
494 		uint32_t bytesRequested;
495 	} __packed command;
496 	struct response {
497 		struct tpm_header hdr;
498 		uint32_t randomBytesSize;
499 		uint8_t	bytes[64];
500 	} __packed response;
501 	bool endwrite = false, endread = false;
502 	size_t nread;
503 	uint16_t tag;
504 	uint32_t pktlen, code, nbytes, entropybits = 0;
505 	int rv;
506 
507 	/* Encode the command.  */
508 	memset(&command, 0, sizeof(command));
509 	command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
510 	command.hdr.length = htobe32(sizeof(command));
511 	command.hdr.code = htobe32(TPM_ORD_GetRandom);
512 	command.bytesRequested = htobe32(sizeof(response.bytes));
513 
514 	/* Write the command.   */
515 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
516 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
517 		    rv);
518 		goto out;
519 	}
520 	endwrite = true;
521 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
522 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
523 		goto out;
524 	}
525 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
526 	endwrite = false;
527 	if (rv) {
528 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
529 		goto out;
530 	}
531 
532 	/* Read the response header.  */
533 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
534 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
535 		    rv);
536 		goto out;
537 	}
538 	endread = true;
539 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
540 		    &nread, 0)) != 0) {
541 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
542 		goto out;
543 	}
544 
545 	/* Verify the response header looks sensible.  */
546 	if (nread != sizeof(response.hdr)) {
547 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu\n",
548 		    nread, sizeof(response.hdr));
549 		goto out;
550 	}
551 	tag = be16toh(response.hdr.tag);
552 	pktlen = be32toh(response.hdr.length);
553 	code = be32toh(response.hdr.code);
554 	if (tag != TPM_TAG_RSP_COMMAND ||
555 	    pktlen < offsetof(struct response, bytes) ||
556 	    pktlen > sizeof(response) ||
557 	    code != 0) {
558 		/*
559 		 * If the tpm itself is busy (e.g., it has yet to run a
560 		 * self-test, or it's in a timeout period to defend
561 		 * against brute force attacks), then we can try again
562 		 * later.  Otherwise, give up.
563 		 */
564 		if (code & TPM_NON_FATAL) {
565 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
566 			    __func__, code & ~TPM_NON_FATAL);
567 			rv = 0;
568 		} else if (code == TPM_DEACTIVATED) {
569 			device_printf(sc->sc_dev, "tpm is deactivated\n");
570 			rv = ENXIO;
571 		} else {
572 			device_printf(sc->sc_dev, "bad tpm response:"
573 			    " tag=%u len=%u code=%u\n", tag, pktlen, code);
574 			hexdump(aprint_debug, "tpm response header",
575 			    (const void *)&response.hdr,
576 			    sizeof(response.hdr));
577 			rv = EIO;
578 		}
579 		goto out;
580 	}
581 
582 	/* Read the response payload.  */
583 	if ((rv = (*sc->sc_intf->read)(sc,
584 		    (char *)&response + nread, pktlen - nread,
585 		    NULL, TPM_PARAM_SIZE)) != 0) {
586 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
587 		goto out;
588 	}
589 	endread = false;
590 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
591 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
592 		goto out;
593 	}
594 
595 	/* Verify the number of bytes read looks sensible.  */
596 	nbytes = be32toh(response.randomBytesSize);
597 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
598 		device_printf(sc->sc_dev, "overlong GetRandom length:"
599 		    " %u, max %zu\n",
600 		    nbytes, pktlen - offsetof(struct response, bytes));
601 		nbytes = pktlen - offsetof(struct response, bytes);
602 	}
603 
604 	/*
605 	 * Enter the data into the entropy pool.  Conservatively (or,
606 	 * perhaps, cargocultily) estimate half a bit of entropy per
607 	 * bit of data.
608 	 */
609 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
610 	entropybits = (NBBY/2)*nbytes;
611 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
612 
613 out:	/* End the read or write if still ongoing.  */
614 	if (endread)
615 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
616 	if (endwrite)
617 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
618 
619 	*entropybitsp = entropybits;
620 	return rv;
621 }
622 
623 static int
624 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
625 {
626 	/*
627 	 * Trusted Platform Module Library, Family "2.0", Level 00
628 	 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
629 	 *
630 	 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
631 	 */
632 	struct {
633 		struct tpm_header hdr;
634 		uint16_t bytesRequested;
635 	} __packed command;
636 	struct response {
637 		struct tpm_header hdr;
638 		uint16_t randomBytesSize;
639 		uint8_t bytes[64];
640 	} __packed response;
641 	bool endwrite = false, endread = false;
642 	size_t nread;
643 	uint16_t tag;
644 	uint32_t pktlen, code, nbytes, entropybits = 0;
645 	int rv;
646 
647 	/* Encode the command.  */
648 	memset(&command, 0, sizeof(command));
649 	command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
650 	command.hdr.length = htobe32(sizeof(command));
651 	command.hdr.code = htobe32(TPM2_CC_GetRandom);
652 	command.bytesRequested = htobe16(sizeof(response.bytes));
653 
654 	/* Write the command.   */
655 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
656 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
657 		    rv);
658 		goto out;
659 	}
660 	endwrite = true;
661 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
662 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
663 		goto out;
664 	}
665 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
666 	endwrite = false;
667 	if (rv) {
668 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
669 		goto out;
670 	}
671 
672 	/* Read the response header.  */
673 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
674 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
675 		    rv);
676 		goto out;
677 	}
678 	endread = true;
679 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
680 		    &nread, 0)) != 0) {
681 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
682 		goto out;
683 	}
684 
685 	/* Verify the response header looks sensible.  */
686 	if (nread != sizeof(response.hdr)) {
687 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
688 		    nread, sizeof(response.hdr));
689 		goto out;
690 	}
691 	tag = be16toh(response.hdr.tag);
692 	pktlen = be32toh(response.hdr.length);
693 	code = be32toh(response.hdr.code);
694 	if (tag != TPM2_ST_NO_SESSIONS ||
695 	    pktlen < offsetof(struct response, bytes) ||
696 	    pktlen > sizeof(response) ||
697 	    code != 0) {
698 		/*
699 		 * If the tpm itself is busy (e.g., it has yet to run a
700 		 * self-test, or it's in a timeout period to defend
701 		 * against brute force attacks), then we can try again
702 		 * later.  Otherwise, give up.
703 		 */
704 		if (code & TPM2_RC_WARN) {
705 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
706 			    " code=TPM_RC_WARN+0x%x\n",
707 			    __func__, code & ~TPM2_RC_WARN);
708 			rv = 0;
709 		} else {
710 			device_printf(sc->sc_dev, "bad tpm response:"
711 			    " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
712 			hexdump(aprint_debug, "tpm response header",
713 			    (const void *)&response.hdr,
714 			    sizeof(response.hdr));
715 			rv = EIO;
716 		}
717 		goto out;
718 	}
719 
720 	/* Read the response payload.  */
721 	if ((rv = (*sc->sc_intf->read)(sc,
722 		    (char *)&response + nread, pktlen - nread,
723 		    NULL, TPM_PARAM_SIZE)) != 0) {
724 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
725 		goto out;
726 	}
727 	endread = false;
728 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
729 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
730 		goto out;
731 	}
732 
733 	/* Verify the number of bytes read looks sensible.  */
734 	nbytes = be16toh(response.randomBytesSize);
735 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
736 		device_printf(sc->sc_dev, "overlong GetRandom length:"
737 		    " %u, max %zu\n",
738 		    nbytes, pktlen - offsetof(struct response, bytes));
739 		nbytes = pktlen - offsetof(struct response, bytes);
740 	}
741 
742 	/*
743 	 * Enter the data into the entropy pool.  Conservatively (or,
744 	 * perhaps, cargocultily) estimate half a bit of entropy per
745 	 * bit of data.
746 	 */
747 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
748 	entropybits = (NBBY/2)*nbytes;
749 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
750 
751 out:	/* End the read or write if still ongoing.  */
752 	if (endread)
753 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
754 	if (endwrite)
755 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
756 
757 	*entropybitsp = entropybits;
758 	return rv;
759 }
760 
761 static void
762 tpm_rng_work(struct work *wk, void *cookie)
763 {
764 	struct tpm_softc *sc = cookie;
765 	unsigned nbytes, entropybits;
766 	int rv;
767 
768 	/* Acknowledge the request.  */
769 	nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
770 
771 	/* Lock the tpm while we do I/O transactions with it.  */
772 	mutex_enter(&sc->sc_lock);
773 
774 	/*
775 	 * Issue as many commands as needed to fulfill the request, but
776 	 * stop if anything fails.
777 	 */
778 	for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
779 		switch (sc->sc_ver) {
780 		case TPM_1_2:
781 			rv = tpm12_rng(sc, &entropybits);
782 			break;
783 		case TPM_2_0:
784 			rv = tpm20_rng(sc, &entropybits);
785 			break;
786 		default:
787 			panic("bad tpm version: %d", sc->sc_ver);
788 		}
789 		if (rv)
790 			break;
791 	}
792 
793 	/*
794 	 * If the tpm is busted, no sense in trying again -- most
795 	 * likely, it is deactivated, and by the spec it cannot be
796 	 * reactivated until after a reboot.
797 	 */
798 	if (rv) {
799 		device_printf(sc->sc_dev, "deactivating entropy source\n");
800 		atomic_store_relaxed(&sc->sc_rnddisabled, true);
801 		/* XXX worker thread can't workqueue_destroy its own queue */
802 	}
803 
804 	/* Relinquish the tpm.  */
805 	mutex_exit(&sc->sc_lock);
806 }
807 
808 static void
809 tpm_rng_get(size_t nbytes, void *cookie)
810 {
811 	struct tpm_softc *sc = cookie;
812 
813 	if (atomic_load_relaxed(&sc->sc_rnddisabled))
814 		return;		/* tough */
815 	if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
816 	    == 0)
817 		workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
818 }
819 
820 static int
821 tpm_tis12_init(struct tpm_softc *sc)
822 {
823 	int rv;
824 
825 	sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
826 	    TPM_INTF_CAPABILITY);
827 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
828 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
829 
830 	aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
831 	    sc->sc_devid, sc->sc_rev);
832 
833 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
834 		return rv;
835 
836 	/* Abort whatever it thought it was doing. */
837 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
838 
839 	/* XXX Run this at higher priority?  */
840 	if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
841 		    tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
842 		return rv;
843 	rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
844 	rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
845 	    RND_TYPE_RNG,
846 	    RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
847 
848 	return 0;
849 }
850 
851 static int
852 tpm_tis12_start(struct tpm_softc *sc, int rw)
853 {
854 	int rv;
855 
856 	if (rw == UIO_READ) {
857 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
858 		    TPM_READ_TMO, sc->sc_intf->read);
859 		return rv;
860 	}
861 
862 	/* Request the 0th locality. */
863 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
864 		return rv;
865 
866 	sc->sc_status = tpm_status(sc);
867 	if (sc->sc_status & TPM_STS_CMD_READY)
868 		return 0;
869 
870 	/* Abort previous and restart. */
871 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
872 	rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
873 	if (rv)
874 		return rv;
875 
876 	return 0;
877 }
878 
879 static int
880 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
881     int flags)
882 {
883 	uint8_t *p = buf;
884 	size_t cnt;
885 	int rv, n;
886 
887 	cnt = 0;
888 	while (len > 0) {
889 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
890 		    TPM_READ_TMO, sc->sc_intf->read);
891 		if (rv)
892 			return rv;
893 
894 		n = MIN(len, tpm_getburst(sc));
895 		while (n > 0) {
896 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
897 			cnt++;
898 			len--;
899 			n--;
900 		}
901 
902 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
903 			break;
904 	}
905 
906 	if (count)
907 		*count = cnt;
908 
909 	return 0;
910 }
911 
912 static int
913 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
914 {
915 	const uint8_t *p = buf;
916 	size_t cnt;
917 	int rv, r;
918 
919 	if (len == 0)
920 		return 0;
921 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
922 		return rv;
923 
924 	cnt = 0;
925 	while (cnt < len - 1) {
926 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
927 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
928 			cnt++;
929 		}
930 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
931 			return rv;
932 		}
933 		sc->sc_status = tpm_status(sc);
934 		if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
935 			return EIO;
936 		}
937 	}
938 
939 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
940 	cnt++;
941 
942 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
943 		return rv;
944 	}
945 	if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
946 		return EIO;
947 	}
948 
949 	return 0;
950 }
951 
952 static int
953 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
954 {
955 	int rv = 0;
956 
957 	if (rw == UIO_READ) {
958 		rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
959 		if (rv)
960 			goto out;
961 
962 		/* Still more data? */
963 		sc->sc_status = tpm_status(sc);
964 		if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
965 			rv = EIO;
966 		}
967 
968 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
969 		    TPM_STS_CMD_READY);
970 
971 		/* Release the 0th locality. */
972 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
973 		    TPM_ACCESS_ACTIVE_LOCALITY);
974 	} else {
975 		/* Hungry for more? */
976 		sc->sc_status = tpm_status(sc);
977 		if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
978 			rv = EIO;
979 		}
980 
981 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
982 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
983 	}
984 
985 out:	return err ? err : rv;
986 }
987 
988 const struct tpm_intf tpm_intf_tis12 = {
989 	.version = TIS_1_2,
990 	.probe = tpm_tis12_probe,
991 	.init = tpm_tis12_init,
992 	.start = tpm_tis12_start,
993 	.read = tpm_tis12_read,
994 	.write = tpm_tis12_write,
995 	.end = tpm_tis12_end
996 };
997 
998 /* -------------------------------------------------------------------------- */
999 
1000 static dev_type_open(tpmopen);
1001 static dev_type_close(tpmclose);
1002 static dev_type_read(tpmread);
1003 static dev_type_write(tpmwrite);
1004 static dev_type_ioctl(tpmioctl);
1005 
1006 const struct cdevsw tpm_cdevsw = {
1007 	.d_open = tpmopen,
1008 	.d_close = tpmclose,
1009 	.d_read = tpmread,
1010 	.d_write = tpmwrite,
1011 	.d_ioctl = tpmioctl,
1012 	.d_stop = nostop,
1013 	.d_tty = notty,
1014 	.d_poll = nopoll,
1015 	.d_mmap = nommap,
1016 	.d_kqfilter = nokqfilter,
1017 	.d_discard = nodiscard,
1018 	.d_flag = D_OTHER | D_MPSAFE,
1019 };
1020 
1021 static int
1022 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
1023 {
1024 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1025 	int ret = 0;
1026 
1027 	if (sc == NULL)
1028 		return ENXIO;
1029 
1030 	mutex_enter(&sc->sc_lock);
1031 	if (sc->sc_busy) {
1032 		ret = EBUSY;
1033 	} else {
1034 		sc->sc_busy = true;
1035 	}
1036 	mutex_exit(&sc->sc_lock);
1037 
1038 	return ret;
1039 }
1040 
1041 static int
1042 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
1043 {
1044 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1045 	int ret = 0;
1046 
1047 	if (sc == NULL)
1048 		return ENXIO;
1049 
1050 	mutex_enter(&sc->sc_lock);
1051 	if (!sc->sc_busy) {
1052 		ret = EINVAL;
1053 	} else {
1054 		sc->sc_busy = false;
1055 	}
1056 	mutex_exit(&sc->sc_lock);
1057 
1058 	return ret;
1059 }
1060 
1061 static int
1062 tpmread(dev_t dev, struct uio *uio, int flags)
1063 {
1064 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1065 	struct tpm_header hdr;
1066 	uint8_t buf[TPM_BUFSIZ];
1067 	size_t cnt, len = 0/*XXXGCC*/;
1068 	bool end = false;
1069 	int rv;
1070 
1071 	if (sc == NULL)
1072 		return ENXIO;
1073 
1074 	mutex_enter(&sc->sc_lock);
1075 
1076 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
1077 		goto out;
1078 	end = true;
1079 
1080 	/* Get the header. */
1081 	if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
1082 		goto out;
1083 	}
1084 	if (cnt != sizeof(hdr)) {
1085 		rv = EIO;
1086 		goto out;
1087 	}
1088 	len = be32toh(hdr.length);
1089 	if (len > MIN(sizeof(buf), uio->uio_resid) || len < sizeof(hdr)) {
1090 		rv = EIO;
1091 		goto out;
1092 	}
1093 
1094 	/* Get the payload. */
1095 	len -= sizeof(hdr);
1096 	if ((rv = (*sc->sc_intf->read)(sc, buf, len, NULL, TPM_PARAM_SIZE))) {
1097 		goto out;
1098 	}
1099 
1100 out:	if (end)
1101 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
1102 
1103 	mutex_exit(&sc->sc_lock);
1104 
1105 	/* If anything went wrong, stop here -- nothing to copy out. */
1106 	if (rv)
1107 		return rv;
1108 
1109 	/* Copy out the header. */
1110 	if ((rv = uiomove(&hdr, sizeof(hdr), uio))) {
1111 		return rv;
1112 	}
1113 
1114 	/* Copy out the payload.  */
1115 	if ((rv = uiomove(buf, len, uio))) {
1116 		return rv;
1117 	}
1118 
1119 	/* Success! */
1120 	return 0;
1121 }
1122 
1123 static int
1124 tpmwrite(dev_t dev, struct uio *uio, int flags)
1125 {
1126 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1127 	uint8_t buf[TPM_BUFSIZ];
1128 	bool end = false;
1129 	int n, rv;
1130 
1131 	if (sc == NULL)
1132 		return ENXIO;
1133 
1134 	n = MIN(sizeof(buf), uio->uio_resid);
1135 	if ((rv = uiomove(buf, n, uio))) {
1136 		return rv;
1137 	}
1138 
1139 	mutex_enter(&sc->sc_lock);
1140 
1141 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
1142 		goto out;
1143 	}
1144 	end = true;
1145 
1146 	if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
1147 		goto out;
1148 	}
1149 
1150 out:	if (end)
1151 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
1152 
1153 	mutex_exit(&sc->sc_lock);
1154 	return rv;
1155 }
1156 
1157 static int
1158 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1159 {
1160 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1161 	struct tpm_ioc_getinfo *info;
1162 
1163 	if (sc == NULL)
1164 		return ENXIO;
1165 
1166 	switch (cmd) {
1167 	case TPM_IOC_GETINFO:
1168 		info = addr;
1169 		info->api_version = TPM_API_VERSION;
1170 		info->tpm_version = sc->sc_ver;
1171 		info->itf_version = sc->sc_intf->version;
1172 		info->device_id = sc->sc_devid;
1173 		info->device_rev = sc->sc_rev;
1174 		info->device_caps = sc->sc_caps;
1175 		return 0;
1176 	default:
1177 		break;
1178 	}
1179 
1180 	return ENOTTY;
1181 }
1182