xref: /netbsd-src/sys/dev/ic/tpm.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /*	$NetBSD: tpm.c,v 1.27 2022/09/25 18:43:32 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2008, 2009 Michael Shalayeff
34  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35  * All rights reserved.
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.27 2022/09/25 18:43:32 thorpej Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/types.h>
55 
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/pmf.h>
62 #include <sys/proc.h>
63 #include <sys/systm.h>
64 #include <sys/workqueue.h>
65 
66 #include <dev/ic/tpmreg.h>
67 #include <dev/ic/tpmvar.h>
68 
69 #include "ioconf.h"
70 
71 CTASSERT(sizeof(struct tpm_header) == 10);
72 
73 #define TPM_BUFSIZ	1024
74 
75 #define TPM_PARAM_SIZE	0x0001	/* that's a flag */
76 
77 /* Timeouts. */
78 #define TPM_ACCESS_TMO	2000	/* 2sec */
79 #define TPM_READY_TMO	2000	/* 2sec */
80 #define TPM_READ_TMO	2000	/* 2sec */
81 #define TPM_BURST_TMO	2000	/* 2sec */
82 
83 #define TPM_CAPS_REQUIRED \
84 	(TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
85 	 TPM_INTF_INT_LEVEL_LOW)
86 
87 static inline int
88 tpm_tmotohz(int tmo)
89 {
90 	struct timeval tv;
91 
92 	tv.tv_sec = tmo / 1000;
93 	tv.tv_usec = 1000 * (tmo % 1000);
94 
95 	return tvtohz(&tv);
96 }
97 
98 static int
99 tpm_getburst(struct tpm_softc *sc)
100 {
101 	int burst, to, rv;
102 
103 	to = tpm_tmotohz(TPM_BURST_TMO);
104 
105 	while (to--) {
106 		/*
107 		 * Burst count is in bits 23:8, so read the two higher bytes.
108 		 */
109 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
110 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
111 		    << 8;
112 
113 		if (burst)
114 			return burst;
115 
116 		rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
117 		if (rv && rv != EWOULDBLOCK) {
118 			return 0;
119 		}
120 	}
121 
122 	return 0;
123 }
124 
125 static inline uint8_t
126 tpm_status(struct tpm_softc *sc)
127 {
128 	return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
129 	    TPM_STS_STATUS_BITS;
130 }
131 
132 /* -------------------------------------------------------------------------- */
133 
134 static bool
135 tpm12_suspend(struct tpm_softc *sc)
136 {
137 	static const uint8_t command[10] = {
138 		0x00, 0xC1,		/* TPM_TAG_RQU_COMMAND */
139 		0x00, 0x00, 0x00, 10,	/* Length in bytes */
140 		0x00, 0x00, 0x00, 0x98	/* TPM_ORD_SaveState */
141 	};
142 	struct tpm_header response;
143 	size_t nread;
144 	bool endwrite = false, endread = false;
145 	int error;
146 
147 	/*
148 	 * Write the command.
149 	 */
150 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
151 	if (error) {
152 		device_printf(sc->sc_dev, "start write failed: %d", error);
153 		goto out;
154 	}
155 
156 	endwrite = true;
157 
158 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
159 	if (error) {
160 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
161 		    error);
162 		goto out;
163 	}
164 
165 	endwrite = false;
166 
167 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
168 	if (error) {
169 		device_printf(sc->sc_dev, "end write failed: %d", error);
170 		goto out;
171 	}
172 
173 	/*
174 	 * Read the response -- just the header; we don't expect a
175 	 * payload.
176 	 */
177 	error = (*sc->sc_intf->start)(sc, UIO_READ);
178 	if (error) {
179 		device_printf(sc->sc_dev, "start read failed: %d", error);
180 		goto out;
181 	}
182 
183 	endread = true;
184 
185 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
186 	    0);
187 	if (error) {
188 		device_printf(sc->sc_dev, "read failed: %d", error);
189 		goto out;
190 	}
191 	if (nread != sizeof(response)) {
192 		device_printf(sc->sc_dev, "short header read: %zu", nread);
193 		goto out;
194 	}
195 
196 	endread = false;
197 
198 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
199 	if (error) {
200 		device_printf(sc->sc_dev, "end read failed: %d", error);
201 		goto out;
202 	}
203 
204 	/*
205 	 * Verify the response looks reasonable.
206 	 */
207 	if (be16toh(response.tag) != TPM_TAG_RSP_COMMAND ||
208 	    be32toh(response.length) != sizeof(response) ||
209 	    be32toh(response.code) != 0) {
210 		device_printf(sc->sc_dev,
211 		    "TPM_ORD_SaveState failed: tag=0x%x length=0x%x code=0x%x",
212 		    be16toh(response.tag),
213 		    be32toh(response.length),
214 		    be32toh(response.code));
215 		error = EIO;
216 		goto out;
217 	}
218 
219 	/* Success!  */
220 	error = 0;
221 
222 out:	if (endwrite)
223 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
224 	if (endread)
225 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
226 	if (error)
227 		return false;
228 	return true;
229 }
230 
231 static bool
232 tpm20_suspend(struct tpm_softc *sc)
233 {
234 	static const uint8_t command[12] = {
235 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS */
236 		0x00, 0x00, 0x00, 12,	/* Length in bytes */
237 		0x00, 0x00, 0x01, 0x45,	/* TPM_CC_Shutdown */
238 		0x00, 0x01		/* TPM_SU_STATE */
239 	};
240 	struct tpm_header response;
241 	size_t nread;
242 	bool endwrite = false, endread = false;
243 	int error;
244 
245 	/*
246 	 * Write the command.
247 	 */
248 	error = (*sc->sc_intf->start)(sc, UIO_WRITE);
249 	if (error) {
250 		device_printf(sc->sc_dev, "start write failed: %d", error);
251 		goto out;
252 	}
253 
254 	endwrite = true;
255 
256 	error = (*sc->sc_intf->write)(sc, &command, sizeof(command));
257 	if (error) {
258 		device_printf(sc->sc_dev, "write TPM_ORD_SaveState failed: %d",
259 		    error);
260 		goto out;
261 	}
262 
263 	endwrite = false;
264 
265 	error = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
266 	if (error) {
267 		device_printf(sc->sc_dev, "end write failed: %d", error);
268 		goto out;
269 	}
270 
271 	/*
272 	 * Read the response -- just the header; we don't expect a
273 	 * payload.
274 	 */
275 	error = (*sc->sc_intf->start)(sc, UIO_READ);
276 	if (error) {
277 		device_printf(sc->sc_dev, "start read failed: %d", error);
278 		goto out;
279 	}
280 
281 	endread = true;
282 
283 	error = (*sc->sc_intf->read)(sc, &response, sizeof(response), &nread,
284 	    0);
285 	if (error) {
286 		device_printf(sc->sc_dev, "read failed: %d", error);
287 		goto out;
288 	}
289 	if (nread != sizeof(response)) {
290 		device_printf(sc->sc_dev, "short header read: %zu", nread);
291 		goto out;
292 	}
293 
294 	endread = false;
295 
296 	error = (*sc->sc_intf->end)(sc, UIO_READ, 0);
297 	if (error) {
298 		device_printf(sc->sc_dev, "end read failed: %d", error);
299 		goto out;
300 	}
301 
302 	/*
303 	 * Verify the response looks reasonable.
304 	 */
305 	if (be16toh(response.tag) != TPM2_ST_NO_SESSIONS ||
306 	    be32toh(response.length) != sizeof(response) ||
307 	    be32toh(response.code) != TPM2_RC_SUCCESS) {
308 		device_printf(sc->sc_dev,
309 		    "TPM_CC_Shutdown failed: tag=0x%x length=0x%x code=0x%x",
310 		    be16toh(response.tag),
311 		    be32toh(response.length),
312 		    be32toh(response.code));
313 		error = EIO;
314 		goto out;
315 	}
316 
317 	/* Success!  */
318 	error = 0;
319 
320 out:	if (endwrite)
321 		error = (*sc->sc_intf->end)(sc, UIO_WRITE, error);
322 	if (endread)
323 		error = (*sc->sc_intf->end)(sc, UIO_READ, error);
324 	if (error)
325 		return false;
326 	return true;
327 }
328 
329 bool
330 tpm_suspend(device_t dev, const pmf_qual_t *qual)
331 {
332 	struct tpm_softc *sc = device_private(dev);
333 
334 	switch (sc->sc_ver) {
335 	case TPM_1_2:
336 		return tpm12_suspend(sc);
337 	case TPM_2_0:
338 		return tpm20_suspend(sc);
339 	default:
340 		panic("%s: impossible", __func__);
341 	}
342 }
343 
344 bool
345 tpm_resume(device_t dev, const pmf_qual_t *qual)
346 {
347 	/*
348 	 * Don't do anything, the BIOS is supposed to restore the previously
349 	 * saved state.
350 	 */
351 	return true;
352 }
353 
354 /* -------------------------------------------------------------------------- */
355 
356 static int
357 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
358 {
359 	int rv;
360 
361 	while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
362 		rv = tsleep(chan, PCATCH, "tpm_poll", 1);
363 		if (rv && rv != EWOULDBLOCK) {
364 			return rv;
365 		}
366 	}
367 
368 	return 0;
369 }
370 
371 static int
372 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
373 {
374 	int retry, to, rv;
375 	uint8_t todo;
376 
377 	to = tpm_tmotohz(tmo);
378 	retry = 3;
379 
380 restart:
381 	todo = bits;
382 
383 	/*
384 	 * TPM_STS_VALID has priority over the others.
385 	 */
386 	if (todo & TPM_STS_VALID) {
387 		if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
388 			return rv;
389 		todo &= ~TPM_STS_VALID;
390 	}
391 
392 	if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
393 		return rv;
394 
395 	if ((todo & sc->sc_status) != todo) {
396 		if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
397 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
398 			    TPM_STS_RESP_RETRY);
399 			goto restart;
400 		}
401 		return EIO;
402 	}
403 
404 	return 0;
405 }
406 
407 /* -------------------------------------------------------------------------- */
408 
409 /*
410  * TPM using the TIS 1.2 interface.
411  */
412 
413 static int
414 tpm12_request_locality(struct tpm_softc *sc, int l)
415 {
416 	uint32_t r;
417 	int to, rv;
418 
419 	if (l != 0)
420 		return EINVAL;
421 
422 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
423 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
424 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
425 		return 0;
426 
427 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
428 	    TPM_ACCESS_REQUEST_USE);
429 
430 	to = tpm_tmotohz(TPM_ACCESS_TMO);
431 
432 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
433 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
434 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
435 		rv = tsleep(sc->sc_intf->init, PCATCH, "tpm_locality", 1);
436 		if (rv && rv != EWOULDBLOCK) {
437 			return rv;
438 		}
439 	}
440 
441 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
442 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
443 		return EBUSY;
444 	}
445 
446 	return 0;
447 }
448 
449 static int
450 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
451 {
452 	uint32_t cap;
453 	uint8_t reg;
454 	int tmo;
455 
456 	cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
457 	if (cap == 0xffffffff)
458 		return EINVAL;
459 	if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
460 		return ENOTSUP;
461 
462 	/* Request locality 0. */
463 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
464 
465 	/* Wait for it to become active. */
466 	tmo = TPM_ACCESS_TMO; /* Milliseconds. */
467 	while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
468 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
469 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
470 		DELAY(1000); /* 1 millisecond. */
471 	}
472 	if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
473 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
474 		return ETIMEDOUT;
475 	}
476 
477 	if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
478 		return EINVAL;
479 
480 	return 0;
481 }
482 
483 static int
484 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
485 {
486 	/*
487 	 * TPM Specification Version 1.2, Main Part 3: Commands,
488 	 * Sec. 13.6 TPM_GetRandom
489 	 */
490 	struct {
491 		struct tpm_header hdr;
492 		uint32_t bytesRequested;
493 	} __packed command;
494 	struct response {
495 		struct tpm_header hdr;
496 		uint32_t randomBytesSize;
497 		uint8_t	bytes[64];
498 	} __packed response;
499 	bool endwrite = false, endread = false;
500 	size_t nread;
501 	uint16_t tag;
502 	uint32_t pktlen, code, nbytes, entropybits = 0;
503 	int rv;
504 
505 	/* Encode the command.  */
506 	memset(&command, 0, sizeof(command));
507 	command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
508 	command.hdr.length = htobe32(sizeof(command));
509 	command.hdr.code = htobe32(TPM_ORD_GetRandom);
510 	command.bytesRequested = htobe32(sizeof(response.bytes));
511 
512 	/* Write the command.   */
513 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
514 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
515 		    rv);
516 		goto out;
517 	}
518 	endwrite = true;
519 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
520 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
521 		goto out;
522 	}
523 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
524 	endwrite = false;
525 	if (rv) {
526 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
527 		goto out;
528 	}
529 
530 	/* Read the response header.  */
531 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
532 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
533 		    rv);
534 		goto out;
535 	}
536 	endread = true;
537 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
538 		    &nread, 0)) != 0) {
539 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
540 		goto out;
541 	}
542 
543 	/* Verify the response header looks sensible.  */
544 	if (nread != sizeof(response.hdr)) {
545 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu\n",
546 		    nread, sizeof(response.hdr));
547 		goto out;
548 	}
549 	tag = be16toh(response.hdr.tag);
550 	pktlen = be32toh(response.hdr.length);
551 	code = be32toh(response.hdr.code);
552 	if (tag != TPM_TAG_RSP_COMMAND ||
553 	    pktlen < offsetof(struct response, bytes) ||
554 	    pktlen > sizeof(response) ||
555 	    code != 0) {
556 		/*
557 		 * If the tpm itself is busy (e.g., it has yet to run a
558 		 * self-test, or it's in a timeout period to defend
559 		 * against brute force attacks), then we can try again
560 		 * later.  Otherwise, give up.
561 		 */
562 		if (code & TPM_NON_FATAL) {
563 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
564 			    __func__, code & ~TPM_NON_FATAL);
565 			rv = 0;
566 		} else if (code == TPM_DEACTIVATED) {
567 			device_printf(sc->sc_dev, "tpm is deactivated\n");
568 			rv = ENXIO;
569 		} else {
570 			device_printf(sc->sc_dev, "bad tpm response:"
571 			    " tag=%u len=%u code=%u\n", tag, pktlen, code);
572 			hexdump(aprint_debug, "tpm response header",
573 			    (const void *)&response.hdr,
574 			    sizeof(response.hdr));
575 			rv = EIO;
576 		}
577 		goto out;
578 	}
579 
580 	/* Read the response payload.  */
581 	if ((rv = (*sc->sc_intf->read)(sc,
582 		    (char *)&response + nread, pktlen - nread,
583 		    NULL, TPM_PARAM_SIZE)) != 0) {
584 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
585 		goto out;
586 	}
587 	endread = false;
588 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
589 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
590 		goto out;
591 	}
592 
593 	/* Verify the number of bytes read looks sensible.  */
594 	nbytes = be32toh(response.randomBytesSize);
595 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
596 		device_printf(sc->sc_dev, "overlong GetRandom length:"
597 		    " %u, max %zu\n",
598 		    nbytes, pktlen - offsetof(struct response, bytes));
599 		nbytes = pktlen - offsetof(struct response, bytes);
600 	}
601 
602 	/*
603 	 * Enter the data into the entropy pool.  Conservatively (or,
604 	 * perhaps, cargocultily) estimate half a bit of entropy per
605 	 * bit of data.
606 	 */
607 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
608 	entropybits = (NBBY/2)*nbytes;
609 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
610 
611 out:	/* End the read or write if still ongoing.  */
612 	if (endread)
613 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
614 	if (endwrite)
615 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
616 
617 	*entropybitsp = entropybits;
618 	return rv;
619 }
620 
621 static int
622 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
623 {
624 	/*
625 	 * Trusted Platform Module Library, Family "2.0", Level 00
626 	 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
627 	 *
628 	 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
629 	 */
630 	struct {
631 		struct tpm_header hdr;
632 		uint16_t bytesRequested;
633 	} __packed command;
634 	struct response {
635 		struct tpm_header hdr;
636 		uint16_t randomBytesSize;
637 		uint8_t bytes[64];
638 	} __packed response;
639 	bool endwrite = false, endread = false;
640 	size_t nread;
641 	uint16_t tag;
642 	uint32_t pktlen, code, nbytes, entropybits = 0;
643 	int rv;
644 
645 	/* Encode the command.  */
646 	memset(&command, 0, sizeof(command));
647 	command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
648 	command.hdr.length = htobe32(sizeof(command));
649 	command.hdr.code = htobe32(TPM2_CC_GetRandom);
650 	command.bytesRequested = htobe16(sizeof(response.bytes));
651 
652 	/* Write the command.   */
653 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
654 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
655 		    rv);
656 		goto out;
657 	}
658 	endwrite = true;
659 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
660 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
661 		goto out;
662 	}
663 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
664 	endwrite = false;
665 	if (rv) {
666 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
667 		goto out;
668 	}
669 
670 	/* Read the response header.  */
671 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
672 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
673 		    rv);
674 		goto out;
675 	}
676 	endread = true;
677 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
678 		    &nread, 0)) != 0) {
679 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
680 		goto out;
681 	}
682 
683 	/* Verify the response header looks sensible.  */
684 	if (nread != sizeof(response.hdr)) {
685 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
686 		    nread, sizeof(response.hdr));
687 		goto out;
688 	}
689 	tag = be16toh(response.hdr.tag);
690 	pktlen = be32toh(response.hdr.length);
691 	code = be32toh(response.hdr.code);
692 	if (tag != TPM2_ST_NO_SESSIONS ||
693 	    pktlen < offsetof(struct response, bytes) ||
694 	    pktlen > sizeof(response) ||
695 	    code != 0) {
696 		/*
697 		 * If the tpm itself is busy (e.g., it has yet to run a
698 		 * self-test, or it's in a timeout period to defend
699 		 * against brute force attacks), then we can try again
700 		 * later.  Otherwise, give up.
701 		 */
702 		if (code & TPM2_RC_WARN) {
703 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
704 			    " code=TPM_RC_WARN+0x%x\n",
705 			    __func__, code & ~TPM2_RC_WARN);
706 			rv = 0;
707 		} else {
708 			device_printf(sc->sc_dev, "bad tpm response:"
709 			    " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
710 			hexdump(aprint_debug, "tpm response header",
711 			    (const void *)&response.hdr,
712 			    sizeof(response.hdr));
713 			rv = EIO;
714 		}
715 		goto out;
716 	}
717 
718 	/* Read the response payload.  */
719 	if ((rv = (*sc->sc_intf->read)(sc,
720 		    (char *)&response + nread, pktlen - nread,
721 		    NULL, TPM_PARAM_SIZE)) != 0) {
722 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
723 		goto out;
724 	}
725 	endread = false;
726 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
727 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
728 		goto out;
729 	}
730 
731 	/* Verify the number of bytes read looks sensible.  */
732 	nbytes = be16toh(response.randomBytesSize);
733 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
734 		device_printf(sc->sc_dev, "overlong GetRandom length:"
735 		    " %u, max %zu\n",
736 		    nbytes, pktlen - offsetof(struct response, bytes));
737 		nbytes = pktlen - offsetof(struct response, bytes);
738 	}
739 
740 	/*
741 	 * Enter the data into the entropy pool.  Conservatively (or,
742 	 * perhaps, cargocultily) estimate half a bit of entropy per
743 	 * bit of data.
744 	 */
745 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
746 	entropybits = (NBBY/2)*nbytes;
747 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
748 
749 out:	/* End the read or write if still ongoing.  */
750 	if (endread)
751 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
752 	if (endwrite)
753 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
754 
755 	*entropybitsp = entropybits;
756 	return rv;
757 }
758 
759 static void
760 tpm_rng_work(struct work *wk, void *cookie)
761 {
762 	struct tpm_softc *sc = cookie;
763 	unsigned nbytes, entropybits;
764 	int rv;
765 
766 	/* Acknowledge the request.  */
767 	nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
768 
769 	/* Lock the tpm while we do I/O transactions with it.  */
770 	mutex_enter(&sc->sc_lock);
771 
772 	/*
773 	 * Issue as many commands as needed to fulfill the request, but
774 	 * stop if anything fails.
775 	 */
776 	for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
777 		switch (sc->sc_ver) {
778 		case TPM_1_2:
779 			rv = tpm12_rng(sc, &entropybits);
780 			break;
781 		case TPM_2_0:
782 			rv = tpm20_rng(sc, &entropybits);
783 			break;
784 		default:
785 			panic("bad tpm version: %d", sc->sc_ver);
786 		}
787 		if (rv)
788 			break;
789 	}
790 
791 	/*
792 	 * If the tpm is busted, no sense in trying again -- most
793 	 * likely, it is deactivated, and by the spec it cannot be
794 	 * reactivated until after a reboot.
795 	 */
796 	if (rv) {
797 		device_printf(sc->sc_dev, "deactivating entropy source\n");
798 		atomic_store_relaxed(&sc->sc_rnddisabled, true);
799 		/* XXX worker thread can't workqueue_destroy its own queue */
800 	}
801 
802 	/* Relinquish the tpm.  */
803 	mutex_exit(&sc->sc_lock);
804 }
805 
806 static void
807 tpm_rng_get(size_t nbytes, void *cookie)
808 {
809 	struct tpm_softc *sc = cookie;
810 
811 	if (atomic_load_relaxed(&sc->sc_rnddisabled))
812 		return;		/* tough */
813 	if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
814 	    == 0)
815 		workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
816 }
817 
818 static int
819 tpm_tis12_init(struct tpm_softc *sc)
820 {
821 	int rv;
822 
823 	sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
824 	    TPM_INTF_CAPABILITY);
825 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
826 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
827 
828 	aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
829 	    sc->sc_devid, sc->sc_rev);
830 
831 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
832 		return rv;
833 
834 	/* Abort whatever it thought it was doing. */
835 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
836 
837 	/* XXX Run this at higher priority?  */
838 	if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
839 		    tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
840 		return rv;
841 	rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
842 	rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
843 	    RND_TYPE_RNG,
844 	    RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
845 
846 	return 0;
847 }
848 
849 static int
850 tpm_tis12_start(struct tpm_softc *sc, int rw)
851 {
852 	int rv;
853 
854 	if (rw == UIO_READ) {
855 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
856 		    TPM_READ_TMO, sc->sc_intf->read);
857 		return rv;
858 	}
859 
860 	/* Request the 0th locality. */
861 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
862 		return rv;
863 
864 	sc->sc_status = tpm_status(sc);
865 	if (sc->sc_status & TPM_STS_CMD_READY)
866 		return 0;
867 
868 	/* Abort previous and restart. */
869 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
870 	rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
871 	if (rv)
872 		return rv;
873 
874 	return 0;
875 }
876 
877 static int
878 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
879     int flags)
880 {
881 	uint8_t *p = buf;
882 	size_t cnt;
883 	int rv, n;
884 
885 	cnt = 0;
886 	while (len > 0) {
887 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
888 		    TPM_READ_TMO, sc->sc_intf->read);
889 		if (rv)
890 			return rv;
891 
892 		n = MIN(len, tpm_getburst(sc));
893 		while (n > 0) {
894 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
895 			cnt++;
896 			len--;
897 			n--;
898 		}
899 
900 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
901 			break;
902 	}
903 
904 	if (count)
905 		*count = cnt;
906 
907 	return 0;
908 }
909 
910 static int
911 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
912 {
913 	const uint8_t *p = buf;
914 	size_t cnt;
915 	int rv, r;
916 
917 	if (len == 0)
918 		return 0;
919 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
920 		return rv;
921 
922 	cnt = 0;
923 	while (cnt < len - 1) {
924 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
925 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
926 			cnt++;
927 		}
928 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
929 			return rv;
930 		}
931 		sc->sc_status = tpm_status(sc);
932 		if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
933 			return EIO;
934 		}
935 	}
936 
937 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
938 	cnt++;
939 
940 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
941 		return rv;
942 	}
943 	if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
944 		return EIO;
945 	}
946 
947 	return 0;
948 }
949 
950 static int
951 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
952 {
953 	int rv = 0;
954 
955 	if (rw == UIO_READ) {
956 		rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
957 		if (rv)
958 			goto out;
959 
960 		/* Still more data? */
961 		sc->sc_status = tpm_status(sc);
962 		if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
963 			rv = EIO;
964 		}
965 
966 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
967 		    TPM_STS_CMD_READY);
968 
969 		/* Release the 0th locality. */
970 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
971 		    TPM_ACCESS_ACTIVE_LOCALITY);
972 	} else {
973 		/* Hungry for more? */
974 		sc->sc_status = tpm_status(sc);
975 		if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
976 			rv = EIO;
977 		}
978 
979 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
980 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
981 	}
982 
983 out:	return err ? err : rv;
984 }
985 
986 const struct tpm_intf tpm_intf_tis12 = {
987 	.version = TIS_1_2,
988 	.probe = tpm_tis12_probe,
989 	.init = tpm_tis12_init,
990 	.start = tpm_tis12_start,
991 	.read = tpm_tis12_read,
992 	.write = tpm_tis12_write,
993 	.end = tpm_tis12_end
994 };
995 
996 /* -------------------------------------------------------------------------- */
997 
998 static dev_type_open(tpmopen);
999 static dev_type_close(tpmclose);
1000 static dev_type_read(tpmread);
1001 static dev_type_write(tpmwrite);
1002 static dev_type_ioctl(tpmioctl);
1003 
1004 const struct cdevsw tpm_cdevsw = {
1005 	.d_open = tpmopen,
1006 	.d_close = tpmclose,
1007 	.d_read = tpmread,
1008 	.d_write = tpmwrite,
1009 	.d_ioctl = tpmioctl,
1010 	.d_stop = nostop,
1011 	.d_tty = notty,
1012 	.d_poll = nopoll,
1013 	.d_mmap = nommap,
1014 	.d_kqfilter = nokqfilter,
1015 	.d_discard = nodiscard,
1016 	.d_flag = D_OTHER | D_MPSAFE,
1017 };
1018 
1019 static int
1020 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
1021 {
1022 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1023 	int ret = 0;
1024 
1025 	if (sc == NULL)
1026 		return ENXIO;
1027 
1028 	mutex_enter(&sc->sc_lock);
1029 	if (sc->sc_busy) {
1030 		ret = EBUSY;
1031 	} else {
1032 		sc->sc_busy = true;
1033 	}
1034 	mutex_exit(&sc->sc_lock);
1035 
1036 	return ret;
1037 }
1038 
1039 static int
1040 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
1041 {
1042 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1043 	int ret = 0;
1044 
1045 	if (sc == NULL)
1046 		return ENXIO;
1047 
1048 	mutex_enter(&sc->sc_lock);
1049 	if (!sc->sc_busy) {
1050 		ret = EINVAL;
1051 	} else {
1052 		sc->sc_busy = false;
1053 	}
1054 	mutex_exit(&sc->sc_lock);
1055 
1056 	return ret;
1057 }
1058 
1059 static int
1060 tpmread(dev_t dev, struct uio *uio, int flags)
1061 {
1062 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1063 	struct tpm_header hdr;
1064 	uint8_t buf[TPM_BUFSIZ];
1065 	size_t cnt, len = 0/*XXXGCC*/;
1066 	bool end = false;
1067 	int rv;
1068 
1069 	if (sc == NULL)
1070 		return ENXIO;
1071 
1072 	mutex_enter(&sc->sc_lock);
1073 
1074 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
1075 		goto out;
1076 	end = true;
1077 
1078 	/* Get the header. */
1079 	if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
1080 		goto out;
1081 	}
1082 	if (cnt != sizeof(hdr)) {
1083 		rv = EIO;
1084 		goto out;
1085 	}
1086 	len = be32toh(hdr.length);
1087 	if (len > MIN(sizeof(buf), uio->uio_resid) || len < sizeof(hdr)) {
1088 		rv = EIO;
1089 		goto out;
1090 	}
1091 
1092 	/* Get the payload. */
1093 	len -= sizeof(hdr);
1094 	if ((rv = (*sc->sc_intf->read)(sc, buf, len, NULL, TPM_PARAM_SIZE))) {
1095 		goto out;
1096 	}
1097 
1098 out:	if (end)
1099 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
1100 
1101 	mutex_exit(&sc->sc_lock);
1102 
1103 	/* If anything went wrong, stop here -- nothing to copy out. */
1104 	if (rv)
1105 		return rv;
1106 
1107 	/* Copy out the header. */
1108 	if ((rv = uiomove(&hdr, sizeof(hdr), uio))) {
1109 		return rv;
1110 	}
1111 
1112 	/* Copy out the payload.  */
1113 	if ((rv = uiomove(buf, len, uio))) {
1114 		return rv;
1115 	}
1116 
1117 	/* Success! */
1118 	return 0;
1119 }
1120 
1121 static int
1122 tpmwrite(dev_t dev, struct uio *uio, int flags)
1123 {
1124 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1125 	uint8_t buf[TPM_BUFSIZ];
1126 	bool end = false;
1127 	int n, rv;
1128 
1129 	if (sc == NULL)
1130 		return ENXIO;
1131 
1132 	n = MIN(sizeof(buf), uio->uio_resid);
1133 	if ((rv = uiomove(buf, n, uio))) {
1134 		return rv;
1135 	}
1136 
1137 	mutex_enter(&sc->sc_lock);
1138 
1139 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
1140 		goto out;
1141 	}
1142 	end = true;
1143 
1144 	if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
1145 		goto out;
1146 	}
1147 
1148 out:	if (end)
1149 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
1150 
1151 	mutex_exit(&sc->sc_lock);
1152 	return rv;
1153 }
1154 
1155 static int
1156 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1157 {
1158 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
1159 	struct tpm_ioc_getinfo *info;
1160 
1161 	if (sc == NULL)
1162 		return ENXIO;
1163 
1164 	switch (cmd) {
1165 	case TPM_IOC_GETINFO:
1166 		info = addr;
1167 		info->api_version = TPM_API_VERSION;
1168 		info->tpm_version = sc->sc_ver;
1169 		info->itf_version = sc->sc_intf->version;
1170 		info->device_id = sc->sc_devid;
1171 		info->device_rev = sc->sc_rev;
1172 		info->device_caps = sc->sc_caps;
1173 		return 0;
1174 	default:
1175 		break;
1176 	}
1177 
1178 	return ENOTTY;
1179 }
1180