xref: /netbsd-src/sys/dev/ic/tpm.c (revision 2718af68c3efc72c9769069b5c7f9ed36f6b9def)
1 /*	$NetBSD: tpm.c,v 1.25 2022/01/29 12:27:30 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2008, 2009 Michael Shalayeff
34  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35  * All rights reserved.
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.25 2022/01/29 12:27:30 riastradh Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/types.h>
55 
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/pmf.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/workqueue.h>
66 
67 #include <dev/ic/tpmreg.h>
68 #include <dev/ic/tpmvar.h>
69 
70 #include "ioconf.h"
71 
72 CTASSERT(sizeof(struct tpm_header) == 10);
73 
74 #define TPM_BUFSIZ	1024
75 
76 #define TPM_PARAM_SIZE	0x0001	/* that's a flag */
77 
78 /* Timeouts. */
79 #define TPM_ACCESS_TMO	2000	/* 2sec */
80 #define TPM_READY_TMO	2000	/* 2sec */
81 #define TPM_READ_TMO	2000	/* 2sec */
82 #define TPM_BURST_TMO	2000	/* 2sec */
83 
84 #define TPM_CAPS_REQUIRED \
85 	(TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
86 	 TPM_INTF_INT_LEVEL_LOW)
87 
88 static inline int
89 tpm_tmotohz(int tmo)
90 {
91 	struct timeval tv;
92 
93 	tv.tv_sec = tmo / 1000;
94 	tv.tv_usec = 1000 * (tmo % 1000);
95 
96 	return tvtohz(&tv);
97 }
98 
99 static int
100 tpm_getburst(struct tpm_softc *sc)
101 {
102 	int burst, to, rv;
103 
104 	to = tpm_tmotohz(TPM_BURST_TMO);
105 
106 	while (to--) {
107 		/*
108 		 * Burst count is in bits 23:8, so read the two higher bytes.
109 		 */
110 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
111 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
112 		    << 8;
113 
114 		if (burst)
115 			return burst;
116 
117 		rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
118 		if (rv && rv != EWOULDBLOCK) {
119 			return 0;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 static inline uint8_t
127 tpm_status(struct tpm_softc *sc)
128 {
129 	return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 	    TPM_STS_STATUS_BITS;
131 }
132 
133 /* -------------------------------------------------------------------------- */
134 
135 static bool
136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 	static const uint8_t command[10] = {
139 		0x00, 0xC1,		/* TPM_TAG_RQU_COMMAND */
140 		0x00, 0x00, 0x00, 10,	/* Length in bytes */
141 		0x00, 0x00, 0x00, 0x98	/* TPM_ORD_SaveState */
142 	};
143 	struct tpm_header response;
144 	size_t nread;
145 	bool endwrite = false, endread = false;
146 	int error;
147 
148 	/*
149 	 * Write the command.
150 	 */
151 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
152 	if (error) {
153 		device_printf(sc->sc_dev, "start write failed: %d", error);
154 		goto out;
155 	}
156 
157 	endwrite = true;
158 
159 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
160 	if (error) {
161 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
162 		    error);
163 		goto out;
164 	}
165 
166 	endwrite = false;
167 
168 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
169 	if (error) {
170 		device_printf(sc->sc_dev, "end write failed: %d", error);
171 		goto out;
172 	}
173 
174 	/*
175 	 * Read the response -- just the header; we don't expect a
176 	 * payload.
177 	 */
178 	error = (*sc->sc_intf->start)(sc, UIO_READ);
179 	if (error) {
180 		device_printf(sc->sc_dev, "start read failed: %d", error);
181 		goto out;
182 	}
183 
184 	endread = true;
185 
186 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
187 	    0);
188 	if (error) {
189 		device_printf(sc->sc_dev, "read failed: %d", error);
190 		goto out;
191 	}
192 	if (nread != sizeof(response)) {
193 		device_printf(sc->sc_dev, "short header read: %zu", nread);
194 		goto out;
195 	}
196 
197 	endread = false;
198 
199 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
200 	if (error) {
201 		device_printf(sc->sc_dev, "end read failed: %d", error);
202 		goto out;
203 	}
204 
205 	/*
206 	 * Verify the response looks reasonable.
207 	 */
208 	if (be16toh(response.tag) != TPM_TAG_RSP_COMMAND ||
209 	    be32toh(response.length) != sizeof(response) ||
210 	    be32toh(response.code) != 0) {
211 		device_printf(sc->sc_dev,
212 		    "TPM_ORD_SaveState failed: tag=0x%x length=0x%x code=0x%x",
213 		    be16toh(response.tag),
214 		    be32toh(response.length),
215 		    be32toh(response.code));
216 		error = EIO;
217 		goto out;
218 	}
219 
220 	/* Success!  */
221 	error = 0;
222 
223 out:	if (endwrite)
224 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
225 	if (endread)
226 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
227 	if (error)
228 		return false;
229 	return true;
230 }
231 
232 static bool
233 tpm20_suspend(struct tpm_softc *sc)
234 {
235 	static const uint8_t command[12] = {
236 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS */
237 		0x00, 0x00, 0x00, 12,	/* Length in bytes */
238 		0x00, 0x00, 0x01, 0x45,	/* TPM_CC_Shutdown */
239 		0x00, 0x01		/* TPM_SU_STATE */
240 	};
241 	struct tpm_header response;
242 	size_t nread;
243 	bool endwrite = false, endread = false;
244 	int error;
245 
246 	/*
247 	 * Write the command.
248 	 */
249 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
250 	if (error) {
251 		device_printf(sc->sc_dev, "start write failed: %d", error);
252 		goto out;
253 	}
254 
255 	endwrite = true;
256 
257 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
258 	if (error) {
259 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
260 		    error);
261 		goto out;
262 	}
263 
264 	endwrite = false;
265 
266 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
267 	if (error) {
268 		device_printf(sc->sc_dev, "end write failed: %d", error);
269 		goto out;
270 	}
271 
272 	/*
273 	 * Read the response -- just the header; we don't expect a
274 	 * payload.
275 	 */
276 	error = (*sc->sc_intf->start)(sc, UIO_READ);
277 	if (error) {
278 		device_printf(sc->sc_dev, "start read failed: %d", error);
279 		goto out;
280 	}
281 
282 	endread = true;
283 
284 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
285 	    0);
286 	if (error) {
287 		device_printf(sc->sc_dev, "read failed: %d", error);
288 		goto out;
289 	}
290 	if (nread != sizeof(response)) {
291 		device_printf(sc->sc_dev, "short header read: %zu", nread);
292 		goto out;
293 	}
294 
295 	endread = false;
296 
297 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
298 	if (error) {
299 		device_printf(sc->sc_dev, "end read failed: %d", error);
300 		goto out;
301 	}
302 
303 	/*
304 	 * Verify the response looks reasonable.
305 	 */
306 	if (be16toh(response.tag) != TPM2_ST_NO_SESSIONS ||
307 	    be32toh(response.length) != sizeof(response) ||
308 	    be32toh(response.code) != TPM2_RC_SUCCESS) {
309 		device_printf(sc->sc_dev,
310 		    "TPM_CC_Shutdown failed: tag=0x%x length=0x%x code=0x%x",
311 		    be16toh(response.tag),
312 		    be32toh(response.length),
313 		    be32toh(response.code));
314 		error = EIO;
315 		goto out;
316 	}
317 
318 	/* Success!  */
319 	error = 0;
320 
321 out:	if (endwrite)
322 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
323 	if (endread)
324 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
325 	if (error)
326 		return false;
327 	return true;
328 }
329 
330 bool
331 tpm_suspend(device_t dev, const pmf_qual_t *qual)
332 {
333 	struct tpm_softc *sc = device_private(dev);
334 
335 	switch (sc->sc_ver) {
336 	case TPM_1_2:
337 		return tpm12_suspend(sc);
338 	case TPM_2_0:
339 		return tpm20_suspend(sc);
340 	default:
341 		panic("%s: impossible", __func__);
342 	}
343 }
344 
345 bool
346 tpm_resume(device_t dev, const pmf_qual_t *qual)
347 {
348 	/*
349 	 * Don't do anything, the BIOS is supposed to restore the previously
350 	 * saved state.
351 	 */
352 	return true;
353 }
354 
355 /* -------------------------------------------------------------------------- */
356 
357 static int
358 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
359 {
360 	int rv;
361 
362 	while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
363 		rv = tsleep(chan, PCATCH, "tpm_poll", 1);
364 		if (rv && rv != EWOULDBLOCK) {
365 			return rv;
366 		}
367 	}
368 
369 	return 0;
370 }
371 
372 static int
373 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
374 {
375 	int retry, to, rv;
376 	uint8_t todo;
377 
378 	to = tpm_tmotohz(tmo);
379 	retry = 3;
380 
381 restart:
382 	todo = bits;
383 
384 	/*
385 	 * TPM_STS_VALID has priority over the others.
386 	 */
387 	if (todo & TPM_STS_VALID) {
388 		if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
389 			return rv;
390 		todo &= ~TPM_STS_VALID;
391 	}
392 
393 	if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
394 		return rv;
395 
396 	if ((todo & sc->sc_status) != todo) {
397 		if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
398 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
399 			    TPM_STS_RESP_RETRY);
400 			goto restart;
401 		}
402 		return EIO;
403 	}
404 
405 	return 0;
406 }
407 
408 /* -------------------------------------------------------------------------- */
409 
410 /*
411  * TPM using the TIS 1.2 interface.
412  */
413 
414 static int
415 tpm12_request_locality(struct tpm_softc *sc, int l)
416 {
417 	uint32_t r;
418 	int to, rv;
419 
420 	if (l != 0)
421 		return EINVAL;
422 
423 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
424 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
425 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
426 		return 0;
427 
428 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
429 	    TPM_ACCESS_REQUEST_USE);
430 
431 	to = tpm_tmotohz(TPM_ACCESS_TMO);
432 
433 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
434 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
435 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
436 		rv = tsleep(sc->sc_intf->init, PCATCH, "tpm_locality", 1);
437 		if (rv && rv != EWOULDBLOCK) {
438 			return rv;
439 		}
440 	}
441 
442 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
443 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
444 		return EBUSY;
445 	}
446 
447 	return 0;
448 }
449 
450 static int
451 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
452 {
453 	uint32_t cap;
454 	uint8_t reg;
455 	int tmo;
456 
457 	cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
458 	if (cap == 0xffffffff)
459 		return EINVAL;
460 	if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
461 		return ENOTSUP;
462 
463 	/* Request locality 0. */
464 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
465 
466 	/* Wait for it to become active. */
467 	tmo = TPM_ACCESS_TMO; /* Milliseconds. */
468 	while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
469 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
470 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
471 		DELAY(1000); /* 1 millisecond. */
472 	}
473 	if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
474 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
475 		return ETIMEDOUT;
476 	}
477 
478 	if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
479 		return EINVAL;
480 
481 	return 0;
482 }
483 
484 static int
485 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
486 {
487 	/*
488 	 * TPM Specification Version 1.2, Main Part 3: Commands,
489 	 * Sec. 13.6 TPM_GetRandom
490 	 */
491 	struct {
492 		struct tpm_header hdr;
493 		uint32_t bytesRequested;
494 	} __packed command;
495 	struct response {
496 		struct tpm_header hdr;
497 		uint32_t randomBytesSize;
498 		uint8_t	bytes[64];
499 	} __packed response;
500 	bool endwrite = false, endread = false;
501 	size_t nread;
502 	uint16_t tag;
503 	uint32_t pktlen, code, nbytes, entropybits = 0;
504 	int rv;
505 
506 	/* Encode the command.  */
507 	memset(&command, 0, sizeof(command));
508 	command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
509 	command.hdr.length = htobe32(sizeof(command));
510 	command.hdr.code = htobe32(TPM_ORD_GetRandom);
511 	command.bytesRequested = htobe32(sizeof(response.bytes));
512 
513 	/* Write the command.   */
514 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
515 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
516 		    rv);
517 		goto out;
518 	}
519 	endwrite = true;
520 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
521 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
522 		goto out;
523 	}
524 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
525 	endwrite = false;
526 	if (rv) {
527 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
528 		goto out;
529 	}
530 
531 	/* Read the response header.  */
532 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
533 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
534 		    rv);
535 		goto out;
536 	}
537 	endread = true;
538 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
539 		    &nread, 0)) != 0) {
540 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
541 		goto out;
542 	}
543 
544 	/* Verify the response header looks sensible.  */
545 	if (nread != sizeof(response.hdr)) {
546 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
547 		    nread, sizeof(response.hdr));
548 		goto out;
549 	}
550 	tag = be16toh(response.hdr.tag);
551 	pktlen = be32toh(response.hdr.length);
552 	code = be32toh(response.hdr.code);
553 	if (tag != TPM_TAG_RSP_COMMAND ||
554 	    pktlen < offsetof(struct response, bytes) ||
555 	    pktlen > sizeof(response) ||
556 	    code != 0) {
557 		/*
558 		 * If the tpm itself is busy (e.g., it has yet to run a
559 		 * self-test, or it's in a timeout period to defend
560 		 * against brute force attacks), then we can try again
561 		 * later.  Otherwise, give up.
562 		 */
563 		if (code & TPM_NON_FATAL) {
564 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
565 			    __func__, code & ~TPM_NON_FATAL);
566 			rv = 0;
567 		} else if (code == TPM_DEACTIVATED) {
568 			device_printf(sc->sc_dev, "tpm is deactivated\n");
569 			rv = ENXIO;
570 		} else {
571 			device_printf(sc->sc_dev, "bad tpm response:"
572 			    " tag=%u len=%u code=%u\n", tag, pktlen, code);
573 			hexdump(aprint_debug, "tpm response header",
574 			    (const void *)&response.hdr,
575 			    sizeof(response.hdr));
576 			rv = EIO;
577 		}
578 		goto out;
579 	}
580 
581 	/* Read the response payload.  */
582 	if ((rv = (*sc->sc_intf->read)(sc,
583 		    (char *)&response + nread, pktlen - nread,
584 		    NULL, TPM_PARAM_SIZE)) != 0) {
585 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
586 		goto out;
587 	}
588 	endread = false;
589 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
590 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
591 		goto out;
592 	}
593 
594 	/* Verify the number of bytes read looks sensible.  */
595 	nbytes = be32toh(response.randomBytesSize);
596 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
597 		device_printf(sc->sc_dev, "overlong GetRandom length:"
598 		    " %u, max %zu\n",
599 		    nbytes, pktlen - offsetof(struct response, bytes));
600 		nbytes = pktlen - offsetof(struct response, bytes);
601 	}
602 
603 	/*
604 	 * Enter the data into the entropy pool.  Conservatively (or,
605 	 * perhaps, cargocultily) estimate half a bit of entropy per
606 	 * bit of data.
607 	 */
608 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
609 	entropybits = (NBBY/2)*nbytes;
610 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
611 
612 out:	/* End the read or write if still ongoing.  */
613 	if (endread)
614 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
615 	if (endwrite)
616 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
617 
618 	*entropybitsp = entropybits;
619 	return rv;
620 }
621 
622 static int
623 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
624 {
625 	/*
626 	 * Trusted Platform Module Library, Family "2.0", Level 00
627 	 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
628 	 *
629 	 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
630 	 */
631 	struct {
632 		struct tpm_header hdr;
633 		uint16_t bytesRequested;
634 	} __packed command;
635 	struct response {
636 		struct tpm_header hdr;
637 		uint16_t randomBytesSize;
638 		uint8_t bytes[64];
639 	} __packed response;
640 	bool endwrite = false, endread = false;
641 	size_t nread;
642 	uint16_t tag;
643 	uint32_t pktlen, code, nbytes, entropybits = 0;
644 	int rv;
645 
646 	/* Encode the command.  */
647 	memset(&command, 0, sizeof(command));
648 	command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
649 	command.hdr.length = htobe32(sizeof(command));
650 	command.hdr.code = htobe32(TPM2_CC_GetRandom);
651 	command.bytesRequested = htobe16(sizeof(response.bytes));
652 
653 	/* Write the command.   */
654 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
655 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
656 		    rv);
657 		goto out;
658 	}
659 	endwrite = true;
660 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
661 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
662 		goto out;
663 	}
664 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
665 	endwrite = false;
666 	if (rv) {
667 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
668 		goto out;
669 	}
670 
671 	/* Read the response header.  */
672 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
673 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
674 		    rv);
675 		goto out;
676 	}
677 	endread = true;
678 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
679 		    &nread, 0)) != 0) {
680 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
681 		goto out;
682 	}
683 
684 	/* Verify the response header looks sensible.  */
685 	if (nread != sizeof(response.hdr)) {
686 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
687 		    nread, sizeof(response.hdr));
688 		goto out;
689 	}
690 	tag = be16toh(response.hdr.tag);
691 	pktlen = be32toh(response.hdr.length);
692 	code = be32toh(response.hdr.code);
693 	if (tag != TPM2_ST_NO_SESSIONS ||
694 	    pktlen < offsetof(struct response, bytes) ||
695 	    pktlen > sizeof(response) ||
696 	    code != 0) {
697 		/*
698 		 * If the tpm itself is busy (e.g., it has yet to run a
699 		 * self-test, or it's in a timeout period to defend
700 		 * against brute force attacks), then we can try again
701 		 * later.  Otherwise, give up.
702 		 */
703 		if (code & TPM2_RC_WARN) {
704 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
705 			    " code=TPM_RC_WARN+0x%x\n",
706 			    __func__, code & ~TPM2_RC_WARN);
707 			rv = 0;
708 		} else {
709 			device_printf(sc->sc_dev, "bad tpm response:"
710 			    " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
711 			hexdump(aprint_debug, "tpm response header",
712 			    (const void *)&response.hdr,
713 			    sizeof(response.hdr));
714 			rv = EIO;
715 		}
716 		goto out;
717 	}
718 
719 	/* Read the response payload.  */
720 	if ((rv = (*sc->sc_intf->read)(sc,
721 		    (char *)&response + nread, pktlen - nread,
722 		    NULL, TPM_PARAM_SIZE)) != 0) {
723 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
724 		goto out;
725 	}
726 	endread = false;
727 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
728 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
729 		goto out;
730 	}
731 
732 	/* Verify the number of bytes read looks sensible.  */
733 	nbytes = be16toh(response.randomBytesSize);
734 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
735 		device_printf(sc->sc_dev, "overlong GetRandom length:"
736 		    " %u, max %zu\n",
737 		    nbytes, pktlen - offsetof(struct response, bytes));
738 		nbytes = pktlen - offsetof(struct response, bytes);
739 	}
740 
741 	/*
742 	 * Enter the data into the entropy pool.  Conservatively (or,
743 	 * perhaps, cargocultily) estimate half a bit of entropy per
744 	 * bit of data.
745 	 */
746 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
747 	entropybits = (NBBY/2)*nbytes;
748 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
749 
750 out:	/* End the read or write if still ongoing.  */
751 	if (endread)
752 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
753 	if (endwrite)
754 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
755 
756 	*entropybitsp = entropybits;
757 	return rv;
758 }
759 
760 static void
761 tpm_rng_work(struct work *wk, void *cookie)
762 {
763 	struct tpm_softc *sc = cookie;
764 	unsigned nbytes, entropybits;
765 	int rv;
766 
767 	/* Acknowledge the request.  */
768 	nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
769 
770 	/* Lock the tpm while we do I/O transactions with it.  */
771 	mutex_enter(&sc->sc_lock);
772 
773 	/*
774 	 * Issue as many commands as needed to fulfill the request, but
775 	 * stop if anything fails.
776 	 */
777 	for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
778 		switch (sc->sc_ver) {
779 		case TPM_1_2:
780 			rv = tpm12_rng(sc, &entropybits);
781 			break;
782 		case TPM_2_0:
783 			rv = tpm20_rng(sc, &entropybits);
784 			break;
785 		default:
786 			panic("bad tpm version: %d", sc->sc_ver);
787 		}
788 		if (rv)
789 			break;
790 	}
791 
792 	/*
793 	 * If the tpm is busted, no sense in trying again -- most
794 	 * likely, it is deactivated, and by the spec it cannot be
795 	 * reactivated until after a reboot.
796 	 */
797 	if (rv) {
798 		device_printf(sc->sc_dev, "deactivating entropy source\n");
799 		atomic_store_relaxed(&sc->sc_rnddisabled, true);
800 		/* XXX worker thread can't workqueue_destroy its own queue */
801 	}
802 
803 	/* Relinquish the tpm.  */
804 	mutex_exit(&sc->sc_lock);
805 }
806 
807 static void
808 tpm_rng_get(size_t nbytes, void *cookie)
809 {
810 	struct tpm_softc *sc = cookie;
811 
812 	if (atomic_load_relaxed(&sc->sc_rnddisabled))
813 		return;		/* tough */
814 	if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
815 	    == 0)
816 		workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
817 }
818 
819 static int
820 tpm_tis12_init(struct tpm_softc *sc)
821 {
822 	int rv;
823 
824 	sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
825 	    TPM_INTF_CAPABILITY);
826 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
827 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
828 
829 	aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
830 	    sc->sc_devid, sc->sc_rev);
831 
832 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
833 		return rv;
834 
835 	/* Abort whatever it thought it was doing. */
836 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
837 
838 	/* XXX Run this at higher priority?  */
839 	if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
840 		    tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
841 		return rv;
842 	rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
843 	rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
844 	    RND_TYPE_RNG,
845 	    RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
846 
847 	return 0;
848 }
849 
850 static int
851 tpm_tis12_start(struct tpm_softc *sc, int rw)
852 {
853 	int rv;
854 
855 	if (rw == UIO_READ) {
856 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
857 		    TPM_READ_TMO, sc->sc_intf->read);
858 		return rv;
859 	}
860 
861 	/* Request the 0th locality. */
862 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
863 		return rv;
864 
865 	sc->sc_status = tpm_status(sc);
866 	if (sc->sc_status & TPM_STS_CMD_READY)
867 		return 0;
868 
869 	/* Abort previous and restart. */
870 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
871 	rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
872 	if (rv)
873 		return rv;
874 
875 	return 0;
876 }
877 
878 static int
879 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
880     int flags)
881 {
882 	uint8_t *p = buf;
883 	size_t cnt;
884 	int rv, n;
885 
886 	cnt = 0;
887 	while (len > 0) {
888 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
889 		    TPM_READ_TMO, sc->sc_intf->read);
890 		if (rv)
891 			return rv;
892 
893 		n = MIN(len, tpm_getburst(sc));
894 		while (n > 0) {
895 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
896 			cnt++;
897 			len--;
898 			n--;
899 		}
900 
901 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
902 			break;
903 	}
904 
905 	if (count)
906 		*count = cnt;
907 
908 	return 0;
909 }
910 
911 static int
912 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
913 {
914 	const uint8_t *p = buf;
915 	size_t cnt;
916 	int rv, r;
917 
918 	if (len == 0)
919 		return 0;
920 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
921 		return rv;
922 
923 	cnt = 0;
924 	while (cnt < len - 1) {
925 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
926 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
927 			cnt++;
928 		}
929 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
930 			return rv;
931 		}
932 		sc->sc_status = tpm_status(sc);
933 		if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
934 			return EIO;
935 		}
936 	}
937 
938 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
939 	cnt++;
940 
941 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
942 		return rv;
943 	}
944 	if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
945 		return EIO;
946 	}
947 
948 	return 0;
949 }
950 
951 static int
952 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
953 {
954 	int rv = 0;
955 
956 	if (rw == UIO_READ) {
957 		rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
958 		if (rv)
959 			goto out;
960 
961 		/* Still more data? */
962 		sc->sc_status = tpm_status(sc);
963 		if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
964 			rv = EIO;
965 		}
966 
967 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
968 		    TPM_STS_CMD_READY);
969 
970 		/* Release the 0th locality. */
971 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
972 		    TPM_ACCESS_ACTIVE_LOCALITY);
973 	} else {
974 		/* Hungry for more? */
975 		sc->sc_status = tpm_status(sc);
976 		if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
977 			rv = EIO;
978 		}
979 
980 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
981 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
982 	}
983 
984 out:	return err ? err : rv;
985 }
986 
987 const struct tpm_intf tpm_intf_tis12 = {
988 	.version = TIS_1_2,
989 	.probe = tpm_tis12_probe,
990 	.init = tpm_tis12_init,
991 	.start = tpm_tis12_start,
992 	.read = tpm_tis12_read,
993 	.write = tpm_tis12_write,
994 	.end = tpm_tis12_end
995 };
996 
997 /* -------------------------------------------------------------------------- */
998 
999 static dev_type_open(tpmopen);
1000 static dev_type_close(tpmclose);
1001 static dev_type_read(tpmread);
1002 static dev_type_write(tpmwrite);
1003 static dev_type_ioctl(tpmioctl);
1004 
1005 const struct cdevsw tpm_cdevsw = {
1006 	.d_open = tpmopen,
1007 	.d_close = tpmclose,
1008 	.d_read = tpmread,
1009 	.d_write = tpmwrite,
1010 	.d_ioctl = tpmioctl,
1011 	.d_stop = nostop,
1012 	.d_tty = notty,
1013 	.d_poll = nopoll,
1014 	.d_mmap = nommap,
1015 	.d_kqfilter = nokqfilter,
1016 	.d_discard = nodiscard,
1017 	.d_flag = D_OTHER | D_MPSAFE,
1018 };
1019 
1020 static int
1021 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
1022 {
1023 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1024 	int ret = 0;
1025 
1026 	if (sc == NULL)
1027 		return ENXIO;
1028 
1029 	mutex_enter(&sc->sc_lock);
1030 	if (sc->sc_busy) {
1031 		ret = EBUSY;
1032 	} else {
1033 		sc->sc_busy = true;
1034 	}
1035 	mutex_exit(&sc->sc_lock);
1036 
1037 	return ret;
1038 }
1039 
1040 static int
1041 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
1042 {
1043 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1044 	int ret = 0;
1045 
1046 	if (sc == NULL)
1047 		return ENXIO;
1048 
1049 	mutex_enter(&sc->sc_lock);
1050 	if (!sc->sc_busy) {
1051 		ret = EINVAL;
1052 	} else {
1053 		sc->sc_busy = false;
1054 	}
1055 	mutex_exit(&sc->sc_lock);
1056 
1057 	return ret;
1058 }
1059 
1060 static int
1061 tpmread(dev_t dev, struct uio *uio, int flags)
1062 {
1063 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1064 	struct tpm_header hdr;
1065 	uint8_t buf[TPM_BUFSIZ];
1066 	size_t cnt, len = 0/*XXXGCC*/;
1067 	bool end = false;
1068 	int rv;
1069 
1070 	if (sc == NULL)
1071 		return ENXIO;
1072 
1073 	mutex_enter(&sc->sc_lock);
1074 
1075 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
1076 		goto out;
1077 	end = true;
1078 
1079 	/* Get the header. */
1080 	if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
1081 		goto out;
1082 	}
1083 	if (cnt != sizeof(hdr)) {
1084 		rv = EIO;
1085 		goto out;
1086 	}
1087 	len = be32toh(hdr.length);
1088 	if (len > MIN(sizeof(buf), uio->uio_resid) || len < sizeof(hdr)) {
1089 		rv = EIO;
1090 		goto out;
1091 	}
1092 
1093 	/* Get the payload. */
1094 	len -= sizeof(hdr);
1095 	if ((rv = (*sc->sc_intf->read)(sc, buf, len, NULL, TPM_PARAM_SIZE))) {
1096 		goto out;
1097 	}
1098 
1099 out:	if (end)
1100 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
1101 
1102 	mutex_exit(&sc->sc_lock);
1103 
1104 	/* If anything went wrong, stop here -- nothing to copy out. */
1105 	if (rv)
1106 		return rv;
1107 
1108 	/* Copy out the header. */
1109 	if ((rv = uiomove(&hdr, sizeof(hdr), uio))) {
1110 		return rv;
1111 	}
1112 
1113 	/* Copy out the payload.  */
1114 	if ((rv = uiomove(buf, len, uio))) {
1115 		return rv;
1116 	}
1117 
1118 	/* Success! */
1119 	return 0;
1120 }
1121 
1122 static int
1123 tpmwrite(dev_t dev, struct uio *uio, int flags)
1124 {
1125 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1126 	uint8_t buf[TPM_BUFSIZ];
1127 	bool end = false;
1128 	int n, rv;
1129 
1130 	if (sc == NULL)
1131 		return ENXIO;
1132 
1133 	n = MIN(sizeof(buf), uio->uio_resid);
1134 	if ((rv = uiomove(buf, n, uio))) {
1135 		return rv;
1136 	}
1137 
1138 	mutex_enter(&sc->sc_lock);
1139 
1140 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
1141 		goto out;
1142 	}
1143 	end = true;
1144 
1145 	if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
1146 		goto out;
1147 	}
1148 
1149 out:	if (end)
1150 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
1151 
1152 	mutex_exit(&sc->sc_lock);
1153 	return rv;
1154 }
1155 
1156 static int
1157 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1158 {
1159 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1160 	struct tpm_ioc_getinfo *info;
1161 
1162 	if (sc == NULL)
1163 		return ENXIO;
1164 
1165 	switch (cmd) {
1166 	case TPM_IOC_GETINFO:
1167 		info = addr;
1168 		info->api_version = TPM_API_VERSION;
1169 		info->tpm_version = sc->sc_ver;
1170 		info->itf_version = sc->sc_intf->version;
1171 		info->device_id = sc->sc_devid;
1172 		info->device_rev = sc->sc_rev;
1173 		info->device_caps = sc->sc_caps;
1174 		return 0;
1175 	default:
1176 		break;
1177 	}
1178 
1179 	return ENOTTY;
1180 }
1181