xref: /netbsd-src/sys/dev/ic/tpm.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: tpm.c,v 1.23 2021/12/20 23:05:55 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2008, 2009 Michael Shalayeff
34  * Copyright (c) 2009, 2010 Hans-Joerg Hoexer
35  * All rights reserved.
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
46  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
47  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: tpm.c,v 1.23 2021/12/20 23:05:55 riastradh Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/types.h>
55 
56 #include <sys/atomic.h>
57 #include <sys/bus.h>
58 #include <sys/conf.h>
59 #include <sys/device.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/pmf.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/workqueue.h>
66 
67 #include <dev/ic/tpmreg.h>
68 #include <dev/ic/tpmvar.h>
69 
70 #include "ioconf.h"
71 
72 CTASSERT(sizeof(struct tpm_header) == 10);
73 
74 #define TPM_BUFSIZ	1024
75 
76 #define TPM_PARAM_SIZE	0x0001	/* that's a flag */
77 
78 /* Timeouts. */
79 #define TPM_ACCESS_TMO	2000	/* 2sec */
80 #define TPM_READY_TMO	2000	/* 2sec */
81 #define TPM_READ_TMO	2000	/* 2sec */
82 #define TPM_BURST_TMO	2000	/* 2sec */
83 
84 #define TPM_CAPS_REQUIRED \
85 	(TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT| \
86 	 TPM_INTF_INT_LEVEL_LOW)
87 
88 static inline int
89 tpm_tmotohz(int tmo)
90 {
91 	struct timeval tv;
92 
93 	tv.tv_sec = tmo / 1000;
94 	tv.tv_usec = 1000 * (tmo % 1000);
95 
96 	return tvtohz(&tv);
97 }
98 
99 static int
100 tpm_getburst(struct tpm_softc *sc)
101 {
102 	int burst, to, rv;
103 
104 	to = tpm_tmotohz(TPM_BURST_TMO);
105 
106 	while (to--) {
107 		/*
108 		 * Burst count is in bits 23:8, so read the two higher bytes.
109 		 */
110 		burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1);
111 		burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2)
112 		    << 8;
113 
114 		if (burst)
115 			return burst;
116 
117 		rv = tsleep(sc, PCATCH, "tpm_getburst", 1);
118 		if (rv && rv != EWOULDBLOCK) {
119 			return 0;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 static inline uint8_t
127 tpm_status(struct tpm_softc *sc)
128 {
129 	return bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) &
130 	    TPM_STS_STATUS_BITS;
131 }
132 
133 /* -------------------------------------------------------------------------- */
134 
135 static bool
136 tpm12_suspend(struct tpm_softc *sc)
137 {
138 	static const uint8_t command[10] = {
139 		0x00, 0xC1,		/* TPM_TAG_RQU_COMMAND */
140 		0x00, 0x00, 0x00, 10,	/* Length in bytes */
141 		0x00, 0x00, 0x00, 0x98	/* TPM_ORD_SaveState */
142 	};
143 	struct tpm_header response;
144 
145 	if ((*sc->sc_intf->write)(sc, &command, sizeof(command)) != 0)
146 		return false;
147 	if ((*sc->sc_intf->read)(sc, &response, sizeof(response), NULL, 0) != 0)
148 		return false;
149 	if (TPM_BE32(response.code) != 0)
150 		return false;
151 
152 	return true;
153 }
154 
155 static bool
156 tpm20_suspend(struct tpm_softc *sc)
157 {
158 	static const uint8_t command[12] = {
159 		0x80, 0x01,		/* TPM_ST_NO_SESSIONS */
160 		0x00, 0x00, 0x00, 12,	/* Length in bytes */
161 		0x00, 0x00, 0x01, 0x45,	/* TPM_CC_Shutdown */
162 		0x00, 0x01		/* TPM_SU_STATE */
163 	};
164 	struct tpm_header response;
165 
166 	if ((*sc->sc_intf->write)(sc, &command, sizeof(command)) != 0)
167 		return false;
168 	if ((*sc->sc_intf->read)(sc, &response, sizeof(response), NULL, 0) != 0)
169 		return false;
170 	if (TPM_BE32(response.code) != 0)
171 		return false;
172 
173 	return true;
174 }
175 
176 bool
177 tpm_suspend(device_t dev, const pmf_qual_t *qual)
178 {
179 	struct tpm_softc *sc = device_private(dev);
180 
181 	switch (sc->sc_ver) {
182 	case TPM_1_2:
183 		return tpm12_suspend(sc);
184 	case TPM_2_0:
185 		return tpm20_suspend(sc);
186 	default:
187 		panic("%s: impossible", __func__);
188 	}
189 }
190 
191 bool
192 tpm_resume(device_t dev, const pmf_qual_t *qual)
193 {
194 	/*
195 	 * Don't do anything, the BIOS is supposed to restore the previously
196 	 * saved state.
197 	 */
198 	return true;
199 }
200 
201 /* -------------------------------------------------------------------------- */
202 
203 static int
204 tpm_poll(struct tpm_softc *sc, uint8_t mask, int to, wchan_t chan)
205 {
206 	int rv;
207 
208 	while (((sc->sc_status = tpm_status(sc)) & mask) != mask && to--) {
209 		rv = tsleep(chan, PCATCH, "tpm_poll", 1);
210 		if (rv && rv != EWOULDBLOCK) {
211 			return rv;
212 		}
213 	}
214 
215 	return 0;
216 }
217 
218 static int
219 tpm_waitfor(struct tpm_softc *sc, uint8_t bits, int tmo, wchan_t chan)
220 {
221 	int retry, to, rv;
222 	uint8_t todo;
223 
224 	to = tpm_tmotohz(tmo);
225 	retry = 3;
226 
227 restart:
228 	todo = bits;
229 
230 	/*
231 	 * TPM_STS_VALID has priority over the others.
232 	 */
233 	if (todo & TPM_STS_VALID) {
234 		if ((rv = tpm_poll(sc, TPM_STS_VALID, to+1, chan)) != 0)
235 			return rv;
236 		todo &= ~TPM_STS_VALID;
237 	}
238 
239 	if ((rv = tpm_poll(sc, todo, to, chan)) != 0)
240 		return rv;
241 
242 	if ((todo & sc->sc_status) != todo) {
243 		if ((retry-- > 0) && (bits & TPM_STS_VALID)) {
244 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
245 			    TPM_STS_RESP_RETRY);
246 			goto restart;
247 		}
248 		return EIO;
249 	}
250 
251 	return 0;
252 }
253 
254 /* -------------------------------------------------------------------------- */
255 
256 /*
257  * TPM using the TIS 1.2 interface.
258  */
259 
260 static int
261 tpm12_request_locality(struct tpm_softc *sc, int l)
262 {
263 	uint32_t r;
264 	int to, rv;
265 
266 	if (l != 0)
267 		return EINVAL;
268 
269 	if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
270 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) ==
271 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
272 		return 0;
273 
274 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
275 	    TPM_ACCESS_REQUEST_USE);
276 
277 	to = tpm_tmotohz(TPM_ACCESS_TMO);
278 
279 	while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) &
280 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
281 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) {
282 		rv = tsleep(sc->sc_intf->init, PCATCH, "tpm_locality", 1);
283 		if (rv && rv != EWOULDBLOCK) {
284 			return rv;
285 		}
286 	}
287 
288 	if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
289 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
290 		return EBUSY;
291 	}
292 
293 	return 0;
294 }
295 
296 static int
297 tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh)
298 {
299 	uint32_t cap;
300 	uint8_t reg;
301 	int tmo;
302 
303 	cap = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITY);
304 	if (cap == 0xffffffff)
305 		return EINVAL;
306 	if ((cap & TPM_CAPS_REQUIRED) != TPM_CAPS_REQUIRED)
307 		return ENOTSUP;
308 
309 	/* Request locality 0. */
310 	bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE);
311 
312 	/* Wait for it to become active. */
313 	tmo = TPM_ACCESS_TMO; /* Milliseconds. */
314 	while ((reg = bus_space_read_1(bt, bh, TPM_ACCESS) &
315 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
316 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && tmo--) {
317 		DELAY(1000); /* 1 millisecond. */
318 	}
319 	if ((reg & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) !=
320 	    (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) {
321 		return ETIMEDOUT;
322 	}
323 
324 	if (bus_space_read_4(bt, bh, TPM_ID) == 0xffffffff)
325 		return EINVAL;
326 
327 	return 0;
328 }
329 
330 static int
331 tpm12_rng(struct tpm_softc *sc, unsigned *entropybitsp)
332 {
333 	/*
334 	 * TPM Specification Version 1.2, Main Part 3: Commands,
335 	 * Sec. 13.6 TPM_GetRandom
336 	 */
337 	struct {
338 		struct tpm_header hdr;
339 		uint32_t bytesRequested;
340 	} __packed command;
341 	struct response {
342 		struct tpm_header hdr;
343 		uint32_t randomBytesSize;
344 		uint8_t	bytes[64];
345 	} __packed response;
346 	bool endwrite = false, endread = false;
347 	size_t nread;
348 	uint16_t tag;
349 	uint32_t pktlen, code, nbytes, entropybits = 0;
350 	int rv;
351 
352 	/* Encode the command.  */
353 	memset(&command, 0, sizeof(command));
354 	command.hdr.tag = htobe16(TPM_TAG_RQU_COMMAND);
355 	command.hdr.length = htobe32(sizeof(command));
356 	command.hdr.code = htobe32(TPM_ORD_GetRandom);
357 	command.bytesRequested = htobe32(sizeof(response.bytes));
358 
359 	/* Write the command.   */
360 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
361 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
362 		    rv);
363 		goto out;
364 	}
365 	endwrite = true;
366 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
367 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
368 		goto out;
369 	}
370 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
371 	endwrite = false;
372 	if (rv) {
373 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
374 		goto out;
375 	}
376 
377 	/* Read the response header.  */
378 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
379 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
380 		    rv);
381 		goto out;
382 	}
383 	endread = true;
384 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
385 		    &nread, 0)) != 0) {
386 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
387 		goto out;
388 	}
389 
390 	/* Verify the response header looks sensible.  */
391 	if (nread != sizeof(response.hdr)) {
392 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
393 		    nread, sizeof(response.hdr));
394 		goto out;
395 	}
396 	tag = be16toh(response.hdr.tag);
397 	pktlen = be32toh(response.hdr.length);
398 	code = be32toh(response.hdr.code);
399 	if (tag != TPM_TAG_RSP_COMMAND ||
400 	    pktlen < offsetof(struct response, bytes) ||
401 	    pktlen > sizeof(response) ||
402 	    code != 0) {
403 		/*
404 		 * If the tpm itself is busy (e.g., it has yet to run a
405 		 * self-test, or it's in a timeout period to defend
406 		 * against brute force attacks), then we can try again
407 		 * later.  Otherwise, give up.
408 		 */
409 		if (code & TPM_NON_FATAL) {
410 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy, code=%u\n",
411 			    __func__, code & ~TPM_NON_FATAL);
412 			rv = 0;
413 		} else if (code == TPM_DEACTIVATED) {
414 			device_printf(sc->sc_dev, "tpm is deactivated\n");
415 			rv = ENXIO;
416 		} else {
417 			device_printf(sc->sc_dev, "bad tpm response:"
418 			    " tag=%u len=%u code=%u\n", tag, pktlen, code);
419 			hexdump(aprint_debug, "tpm response header",
420 			    (const void *)&response.hdr,
421 			    sizeof(response.hdr));
422 			rv = EIO;
423 		}
424 		goto out;
425 	}
426 
427 	/* Read the response payload.  */
428 	if ((rv = (*sc->sc_intf->read)(sc,
429 		    (char *)&response + nread, pktlen - nread,
430 		    NULL, TPM_PARAM_SIZE)) != 0) {
431 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
432 		goto out;
433 	}
434 	endread = false;
435 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
436 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
437 		goto out;
438 	}
439 
440 	/* Verify the number of bytes read looks sensible.  */
441 	nbytes = be32toh(response.randomBytesSize);
442 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
443 		device_printf(sc->sc_dev, "overlong GetRandom length:"
444 		    " %u, max %zu\n",
445 		    nbytes, pktlen - offsetof(struct response, bytes));
446 		nbytes = pktlen - offsetof(struct response, bytes);
447 	}
448 
449 	/*
450 	 * Enter the data into the entropy pool.  Conservatively (or,
451 	 * perhaps, cargocultily) estimate half a bit of entropy per
452 	 * bit of data.
453 	 */
454 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
455 	entropybits = (NBBY/2)*nbytes;
456 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
457 
458 out:	/* End the read or write if still ongoing.  */
459 	if (endread)
460 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
461 	if (endwrite)
462 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
463 
464 	*entropybitsp = entropybits;
465 	return rv;
466 }
467 
468 static int
469 tpm20_rng(struct tpm_softc *sc, unsigned *entropybitsp)
470 {
471 	/*
472 	 * Trusted Platform Module Library, Family "2.0", Level 00
473 	 * Revision 01.38, Part 3: Commands, Sec. 16.1 `TPM2_GetRandom'
474 	 *
475 	 * https://trustedcomputinggroup.org/wp-content/uploads/TPM-Rev-2.0-Part-3-Commands-01.38.pdf#page=133
476 	 */
477 	struct {
478 		struct tpm_header hdr;
479 		uint16_t bytesRequested;
480 	} __packed command;
481 	struct response {
482 		struct tpm_header hdr;
483 		uint16_t randomBytesSize;
484 		uint8_t bytes[64];
485 	} __packed response;
486 	bool endwrite = false, endread = false;
487 	size_t nread;
488 	uint16_t tag;
489 	uint32_t pktlen, code, nbytes, entropybits = 0;
490 	int rv;
491 
492 	/* Encode the command.  */
493 	memset(&command, 0, sizeof(command));
494 	command.hdr.tag = htobe16(TPM2_ST_NO_SESSIONS);
495 	command.hdr.length = htobe32(sizeof(command));
496 	command.hdr.code = htobe32(TPM2_CC_GetRandom);
497 	command.bytesRequested = htobe16(sizeof(response.bytes));
498 
499 	/* Write the command.   */
500 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE)) != 0) {
501 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
502 		    rv);
503 		goto out;
504 	}
505 	endwrite = true;
506 	if ((rv = (*sc->sc_intf->write)(sc, &command, sizeof(command))) != 0) {
507 		device_printf(sc->sc_dev, "write failed, error=%d\n", rv);
508 		goto out;
509 	}
510 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, 0);
511 	endwrite = false;
512 	if (rv) {
513 		device_printf(sc->sc_dev, "end write failed, error=%d\n", rv);
514 		goto out;
515 	}
516 
517 	/* Read the response header.  */
518 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)) != 0) {
519 		device_printf(sc->sc_dev, "start write failed, error=%d\n",
520 		    rv);
521 		goto out;
522 	}
523 	endread = true;
524 	if ((rv = (*sc->sc_intf->read)(sc, &response.hdr, sizeof(response.hdr),
525 		    &nread, 0)) != 0) {
526 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
527 		goto out;
528 	}
529 
530 	/* Verify the response header looks sensible.  */
531 	if (nread != sizeof(response.hdr)) {
532 		device_printf(sc->sc_dev, "read %zu bytes, expected %zu",
533 		    nread, sizeof(response.hdr));
534 		goto out;
535 	}
536 	tag = be16toh(response.hdr.tag);
537 	pktlen = be32toh(response.hdr.length);
538 	code = be32toh(response.hdr.code);
539 	if (tag != TPM2_ST_NO_SESSIONS ||
540 	    pktlen < offsetof(struct response, bytes) ||
541 	    pktlen > sizeof(response) ||
542 	    code != 0) {
543 		/*
544 		 * If the tpm itself is busy (e.g., it has yet to run a
545 		 * self-test, or it's in a timeout period to defend
546 		 * against brute force attacks), then we can try again
547 		 * later.  Otherwise, give up.
548 		 */
549 		if (code & TPM2_RC_WARN) {
550 			aprint_debug_dev(sc->sc_dev, "%s: tpm busy,"
551 			    " code=TPM_RC_WARN+0x%x\n",
552 			    __func__, code & ~TPM2_RC_WARN);
553 			rv = 0;
554 		} else {
555 			device_printf(sc->sc_dev, "bad tpm response:"
556 			    " tag=%u len=%u code=0x%x\n", tag, pktlen, code);
557 			hexdump(aprint_debug, "tpm response header",
558 			    (const void *)&response.hdr,
559 			    sizeof(response.hdr));
560 			rv = EIO;
561 		}
562 		goto out;
563 	}
564 
565 	/* Read the response payload.  */
566 	if ((rv = (*sc->sc_intf->read)(sc,
567 		    (char *)&response + nread, pktlen - nread,
568 		    NULL, TPM_PARAM_SIZE)) != 0) {
569 		device_printf(sc->sc_dev, "read failed, error=%d\n", rv);
570 		goto out;
571 	}
572 	endread = false;
573 	if ((rv = (*sc->sc_intf->end)(sc, UIO_READ, 0)) != 0) {
574 		device_printf(sc->sc_dev, "end read failed, error=%d\n", rv);
575 		goto out;
576 	}
577 
578 	/* Verify the number of bytes read looks sensible.  */
579 	nbytes = be16toh(response.randomBytesSize);
580 	if (nbytes > pktlen - offsetof(struct response, bytes)) {
581 		device_printf(sc->sc_dev, "overlong GetRandom length:"
582 		    " %u, max %zu\n",
583 		    nbytes, pktlen - offsetof(struct response, bytes));
584 		nbytes = pktlen - offsetof(struct response, bytes);
585 	}
586 
587 	/*
588 	 * Enter the data into the entropy pool.  Conservatively (or,
589 	 * perhaps, cargocultily) estimate half a bit of entropy per
590 	 * bit of data.
591 	 */
592 	CTASSERT(sizeof(response.bytes) <= UINT_MAX/(NBBY/2));
593 	entropybits = (NBBY/2)*nbytes;
594 	rnd_add_data(&sc->sc_rnd, response.bytes, nbytes, entropybits);
595 
596 out:	/* End the read or write if still ongoing.  */
597 	if (endread)
598 		rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
599 	if (endwrite)
600 		rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
601 
602 	*entropybitsp = entropybits;
603 	return rv;
604 }
605 
606 static void
607 tpm_rng_work(struct work *wk, void *cookie)
608 {
609 	struct tpm_softc *sc = cookie;
610 	unsigned nbytes, entropybits;
611 	bool busy;
612 	int rv;
613 
614 	/* Acknowledge the request.  */
615 	nbytes = atomic_swap_uint(&sc->sc_rndpending, 0);
616 
617 	/* Lock userland out of the tpm, or fail if it's already open.  */
618 	mutex_enter(&sc->sc_lock);
619 	busy = sc->sc_busy;
620 	sc->sc_busy = true;
621 	mutex_exit(&sc->sc_lock);
622 	if (busy) {		/* tough */
623 		aprint_debug_dev(sc->sc_dev, "%s: device in use\n", __func__);
624 		return;
625 	}
626 
627 	/*
628 	 * Issue as many commands as needed to fulfill the request, but
629 	 * stop if anything fails.
630 	 */
631 	for (; nbytes; nbytes -= MIN(nbytes, MAX(1, entropybits/NBBY))) {
632 		switch (sc->sc_ver) {
633 		case TPM_1_2:
634 			rv = tpm12_rng(sc, &entropybits);
635 			break;
636 		case TPM_2_0:
637 			rv = tpm20_rng(sc, &entropybits);
638 			break;
639 		default:
640 			panic("bad tpm version: %d", sc->sc_ver);
641 		}
642 		if (rv)
643 			break;
644 	}
645 
646 	/*
647 	 * If the tpm is busted, no sense in trying again -- most
648 	 * likely, it is deactivated, and by the spec it cannot be
649 	 * reactivated until after a reboot.
650 	 */
651 	if (rv) {
652 		device_printf(sc->sc_dev, "deactivating entropy source\n");
653 		atomic_store_relaxed(&sc->sc_rnddisabled, true);
654 		/* XXX worker thread can't workqueue_destroy its own queue */
655 	}
656 
657 	/* Relinquish the tpm back to userland.  */
658 	mutex_enter(&sc->sc_lock);
659 	KASSERT(sc->sc_busy);
660 	sc->sc_busy = false;
661 	mutex_exit(&sc->sc_lock);
662 }
663 
664 static void
665 tpm_rng_get(size_t nbytes, void *cookie)
666 {
667 	struct tpm_softc *sc = cookie;
668 
669 	if (atomic_load_relaxed(&sc->sc_rnddisabled))
670 		return;		/* tough */
671 	if (atomic_swap_uint(&sc->sc_rndpending, MIN(nbytes, UINT_MAX/NBBY))
672 	    == 0)
673 		workqueue_enqueue(sc->sc_rndwq, &sc->sc_rndwk, NULL);
674 }
675 
676 static int
677 tpm_tis12_init(struct tpm_softc *sc)
678 {
679 	int rv;
680 
681 	sc->sc_caps = bus_space_read_4(sc->sc_bt, sc->sc_bh,
682 	    TPM_INTF_CAPABILITY);
683 	sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID);
684 	sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV);
685 
686 	aprint_normal_dev(sc->sc_dev, "device 0x%08x rev 0x%x\n",
687 	    sc->sc_devid, sc->sc_rev);
688 
689 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
690 		return rv;
691 
692 	/* Abort whatever it thought it was doing. */
693 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
694 
695 	/* XXX Run this at higher priority?  */
696 	if ((rv = workqueue_create(&sc->sc_rndwq, device_xname(sc->sc_dev),
697 		    tpm_rng_work, sc, PRI_NONE, IPL_VM, WQ_MPSAFE)) != 0)
698 		return rv;
699 	rndsource_setcb(&sc->sc_rnd, tpm_rng_get, sc);
700 	rnd_attach_source(&sc->sc_rnd, device_xname(sc->sc_dev),
701 	    RND_TYPE_RNG,
702 	    RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE|RND_FLAG_HASCB);
703 
704 	return 0;
705 }
706 
707 static int
708 tpm_tis12_start(struct tpm_softc *sc, int rw)
709 {
710 	int rv;
711 
712 	if (rw == UIO_READ) {
713 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
714 		    TPM_READ_TMO, sc->sc_intf->read);
715 		return rv;
716 	}
717 
718 	/* Request the 0th locality. */
719 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
720 		return rv;
721 
722 	sc->sc_status = tpm_status(sc);
723 	if (sc->sc_status & TPM_STS_CMD_READY)
724 		return 0;
725 
726 	/* Abort previous and restart. */
727 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY);
728 	rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_intf->write);
729 	if (rv)
730 		return rv;
731 
732 	return 0;
733 }
734 
735 static int
736 tpm_tis12_read(struct tpm_softc *sc, void *buf, size_t len, size_t *count,
737     int flags)
738 {
739 	uint8_t *p = buf;
740 	size_t cnt;
741 	int rv, n;
742 
743 	cnt = 0;
744 	while (len > 0) {
745 		rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
746 		    TPM_READ_TMO, sc->sc_intf->read);
747 		if (rv)
748 			return rv;
749 
750 		n = MIN(len, tpm_getburst(sc));
751 		while (n > 0) {
752 			*p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA);
753 			cnt++;
754 			len--;
755 			n--;
756 		}
757 
758 		if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6)
759 			break;
760 	}
761 
762 	if (count)
763 		*count = cnt;
764 
765 	return 0;
766 }
767 
768 static int
769 tpm_tis12_write(struct tpm_softc *sc, const void *buf, size_t len)
770 {
771 	const uint8_t *p = buf;
772 	size_t cnt;
773 	int rv, r;
774 
775 	if (len == 0)
776 		return 0;
777 	if ((rv = tpm12_request_locality(sc, 0)) != 0)
778 		return rv;
779 
780 	cnt = 0;
781 	while (cnt < len - 1) {
782 		for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) {
783 			bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
784 			cnt++;
785 		}
786 		if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
787 			return rv;
788 		}
789 		sc->sc_status = tpm_status(sc);
790 		if (!(sc->sc_status & TPM_STS_DATA_EXPECT)) {
791 			return EIO;
792 		}
793 	}
794 
795 	bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++);
796 	cnt++;
797 
798 	if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) {
799 		return rv;
800 	}
801 	if ((sc->sc_status & TPM_STS_DATA_EXPECT) != 0) {
802 		return EIO;
803 	}
804 
805 	return 0;
806 }
807 
808 static int
809 tpm_tis12_end(struct tpm_softc *sc, int rw, int err)
810 {
811 	int rv = 0;
812 
813 	if (rw == UIO_READ) {
814 		rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_intf->read);
815 		if (rv)
816 			goto out;
817 
818 		/* Still more data? */
819 		sc->sc_status = tpm_status(sc);
820 		if (!err && (sc->sc_status & TPM_STS_DATA_AVAIL)) {
821 			rv = EIO;
822 		}
823 
824 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
825 		    TPM_STS_CMD_READY);
826 
827 		/* Release the 0th locality. */
828 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS,
829 		    TPM_ACCESS_ACTIVE_LOCALITY);
830 	} else {
831 		/* Hungry for more? */
832 		sc->sc_status = tpm_status(sc);
833 		if (!err && (sc->sc_status & TPM_STS_DATA_EXPECT)) {
834 			rv = EIO;
835 		}
836 
837 		bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS,
838 		    err ? TPM_STS_CMD_READY : TPM_STS_GO);
839 	}
840 
841 out:	return err ? err : rv;
842 }
843 
844 const struct tpm_intf tpm_intf_tis12 = {
845 	.version = TIS_1_2,
846 	.probe = tpm_tis12_probe,
847 	.init = tpm_tis12_init,
848 	.start = tpm_tis12_start,
849 	.read = tpm_tis12_read,
850 	.write = tpm_tis12_write,
851 	.end = tpm_tis12_end
852 };
853 
854 /* -------------------------------------------------------------------------- */
855 
856 static dev_type_open(tpmopen);
857 static dev_type_close(tpmclose);
858 static dev_type_read(tpmread);
859 static dev_type_write(tpmwrite);
860 static dev_type_ioctl(tpmioctl);
861 
862 const struct cdevsw tpm_cdevsw = {
863 	.d_open = tpmopen,
864 	.d_close = tpmclose,
865 	.d_read = tpmread,
866 	.d_write = tpmwrite,
867 	.d_ioctl = tpmioctl,
868 	.d_stop = nostop,
869 	.d_tty = notty,
870 	.d_poll = nopoll,
871 	.d_mmap = nommap,
872 	.d_kqfilter = nokqfilter,
873 	.d_discard = nodiscard,
874 	.d_flag = D_OTHER | D_MPSAFE,
875 };
876 
877 static int
878 tpmopen(dev_t dev, int flag, int mode, struct lwp *l)
879 {
880 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
881 	int ret = 0;
882 
883 	if (sc == NULL)
884 		return ENXIO;
885 
886 	mutex_enter(&sc->sc_lock);
887 	if (sc->sc_busy) {
888 		ret = EBUSY;
889 	} else {
890 		sc->sc_busy = true;
891 	}
892 	mutex_exit(&sc->sc_lock);
893 
894 	return ret;
895 }
896 
897 static int
898 tpmclose(dev_t dev, int flag, int mode, struct lwp *l)
899 {
900 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
901 	int ret = 0;
902 
903 	if (sc == NULL)
904 		return ENXIO;
905 
906 	mutex_enter(&sc->sc_lock);
907 	if (!sc->sc_busy) {
908 		ret = EINVAL;
909 	} else {
910 		sc->sc_busy = false;
911 	}
912 	mutex_exit(&sc->sc_lock);
913 
914 	return ret;
915 }
916 
917 static int
918 tpmread(dev_t dev, struct uio *uio, int flags)
919 {
920 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
921 	struct tpm_header hdr;
922 	uint8_t buf[TPM_BUFSIZ];
923 	size_t cnt, len, n;
924 	int rv;
925 
926 	if (sc == NULL)
927 		return ENXIO;
928 
929 	if ((rv = (*sc->sc_intf->start)(sc, UIO_READ)))
930 		return rv;
931 
932 	/* Get the header. */
933 	if ((rv = (*sc->sc_intf->read)(sc, &hdr, sizeof(hdr), &cnt, 0))) {
934 		goto out;
935 	}
936 	len = TPM_BE32(hdr.length);
937 	if (len > uio->uio_resid || len < cnt) {
938 		rv = EIO;
939 		goto out;
940 	}
941 
942 	/* Copy out the header. */
943 	if ((rv = uiomove(&hdr, cnt, uio))) {
944 		goto out;
945 	}
946 
947 	/* Process the rest. */
948 	len -= cnt;
949 	while (len > 0) {
950 		n = MIN(sizeof(buf), len);
951 		if ((rv = (*sc->sc_intf->read)(sc, buf, n, NULL, TPM_PARAM_SIZE))) {
952 			goto out;
953 		}
954 		if ((rv = uiomove(buf, n, uio))) {
955 			goto out;
956 		}
957 		len -= n;
958 	}
959 
960 out:
961 	rv = (*sc->sc_intf->end)(sc, UIO_READ, rv);
962 	return rv;
963 }
964 
965 static int
966 tpmwrite(dev_t dev, struct uio *uio, int flags)
967 {
968 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
969 	uint8_t buf[TPM_BUFSIZ];
970 	int n, rv;
971 
972 	if (sc == NULL)
973 		return ENXIO;
974 
975 	n = MIN(sizeof(buf), uio->uio_resid);
976 	if ((rv = uiomove(buf, n, uio))) {
977 		goto out;
978 	}
979 	if ((rv = (*sc->sc_intf->start)(sc, UIO_WRITE))) {
980 		goto out;
981 	}
982 	if ((rv = (*sc->sc_intf->write)(sc, buf, n))) {
983 		goto out;
984 	}
985 
986 	rv = (*sc->sc_intf->end)(sc, UIO_WRITE, rv);
987 out:
988 	return rv;
989 }
990 
991 static int
992 tpmioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
993 {
994 	struct tpm_softc *sc = device_lookup_private(&tpm_cd, minor(dev));
995 	struct tpm_ioc_getinfo *info;
996 
997 	if (sc == NULL)
998 		return ENXIO;
999 
1000 	switch (cmd) {
1001 	case TPM_IOC_GETINFO:
1002 		info = addr;
1003 		info->api_version = TPM_API_VERSION;
1004 		info->tpm_version = sc->sc_ver;
1005 		info->itf_version = sc->sc_intf->version;
1006 		info->device_id = sc->sc_devid;
1007 		info->device_rev = sc->sc_rev;
1008 		info->device_caps = sc->sc_caps;
1009 		return 0;
1010 	default:
1011 		break;
1012 	}
1013 
1014 	return ENOTTY;
1015 }
1016