xref: /netbsd-src/sys/netbt/hci_link.c (revision daf6c4152fcddc27c445489775ed1f66ab4ea9a9)
1 /*	$NetBSD: hci_link.c,v 1.22 2010/10/14 07:05:03 plunky Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 Iain Hibbert.
5  * Copyright (c) 2006 Itronix Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of Itronix Inc. may not be used to endorse
17  *    or promote products derived from this software without specific
18  *    prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  * ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_link.c,v 1.22 2010/10/14 07:05:03 plunky Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/systm.h>
43 
44 #include <netbt/bluetooth.h>
45 #include <netbt/hci.h>
46 #include <netbt/l2cap.h>
47 #include <netbt/sco.h>
48 
49 /*******************************************************************************
50  *
51  *	HCI ACL Connections
52  */
53 
54 /*
55  * Automatically expire unused ACL connections after this number of
56  * seconds (if zero, do not expire unused connections) [sysctl]
57  */
58 int hci_acl_expiry = 10;	/* seconds */
59 
60 /*
61  * hci_acl_open(unit, bdaddr)
62  *
63  * open ACL connection to remote bdaddr. Only one ACL connection is permitted
64  * between any two Bluetooth devices, so we look for an existing one before
65  * trying to start a new one.
66  */
67 struct hci_link *
68 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
69 {
70 	struct hci_link *link;
71 	struct hci_memo *memo;
72 	hci_create_con_cp cp;
73 	int err;
74 
75 	KASSERT(unit != NULL);
76 	KASSERT(bdaddr != NULL);
77 
78 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
79 	if (link == NULL) {
80 		link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
81 		if (link == NULL)
82 			return NULL;
83 	}
84 
85 	switch(link->hl_state) {
86 	case HCI_LINK_CLOSED:
87 		/*
88 		 * open connection to remote device
89 		 */
90 		memset(&cp, 0, sizeof(cp));
91 		bdaddr_copy(&cp.bdaddr, bdaddr);
92 		cp.pkt_type = htole16(unit->hci_packet_type);
93 
94 		memo = hci_memo_find(unit, bdaddr);
95 		if (memo != NULL) {
96 			cp.page_scan_rep_mode = memo->page_scan_rep_mode;
97 			cp.page_scan_mode = memo->page_scan_mode;
98 			cp.clock_offset = memo->clock_offset;
99 		}
100 
101 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
102 			cp.accept_role_switch = 1;
103 
104 		err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
105 		if (err) {
106 			hci_link_free(link, err);
107 			return NULL;
108 		}
109 
110 		link->hl_flags |= HCI_LINK_CREATE_CON;
111 		link->hl_state = HCI_LINK_WAIT_CONNECT;
112 		break;
113 
114 	case HCI_LINK_WAIT_CONNECT:
115 	case HCI_LINK_WAIT_AUTH:
116 	case HCI_LINK_WAIT_ENCRYPT:
117 	case HCI_LINK_WAIT_SECURE:
118 		/*
119 		 * somebody else already trying to connect, we just
120 		 * sit on the bench with them..
121 		 */
122 		break;
123 
124 	case HCI_LINK_OPEN:
125 		/*
126 		 * If already open, halt any expiry timeouts. We dont need
127 		 * to care about already invoking timeouts since refcnt >0
128 		 * will keep the link alive.
129 		 */
130 		callout_stop(&link->hl_expire);
131 		break;
132 
133 	default:
134 		UNKNOWN(link->hl_state);
135 		return NULL;
136 	}
137 
138 	/* open */
139 	link->hl_refcnt++;
140 
141 	return link;
142 }
143 
144 /*
145  * Close ACL connection. When there are no more references to this link,
146  * we can either close it down or schedule a delayed closedown.
147  */
148 void
149 hci_acl_close(struct hci_link *link, int err)
150 {
151 
152 	KASSERT(link != NULL);
153 
154 	if (--link->hl_refcnt == 0) {
155 		if (link->hl_state == HCI_LINK_CLOSED)
156 			hci_link_free(link, err);
157 		else if (hci_acl_expiry > 0)
158 			callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
159 	}
160 }
161 
162 /*
163  * Incoming ACL connection.
164  *
165  * Check the L2CAP listeners list and only accept when there is a
166  * potential listener available.
167  *
168  * There should not be a link to the same bdaddr already, we check
169  * anyway though its left unhandled for now.
170  */
171 struct hci_link *
172 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
173 {
174 	struct hci_link *link;
175 	struct l2cap_channel *chan;
176 
177 	LIST_FOREACH(chan, &l2cap_listen_list, lc_ncid) {
178 		if (bdaddr_same(&unit->hci_bdaddr, &chan->lc_laddr.bt_bdaddr)
179 		    || bdaddr_any(&chan->lc_laddr.bt_bdaddr))
180 			break;
181 	}
182 
183 	if (chan == NULL) {
184 		DPRINTF("%s: rejecting connection (no listeners)\n",
185 		    device_xname(unit->hci_dev));
186 
187 		return NULL;
188 	}
189 
190 	link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
191 	if (link != NULL) {
192 		DPRINTF("%s: rejecting connection (link exists)\n",
193 		    device_xname(unit->hci_dev));
194 
195 		return NULL;
196 	}
197 
198 	link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
199 	if (link != NULL) {
200 		link->hl_state = HCI_LINK_WAIT_CONNECT;
201 
202 		if (hci_acl_expiry > 0)
203 			callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
204 	}
205 
206 	return link;
207 }
208 
209 void
210 hci_acl_timeout(void *arg)
211 {
212 	struct hci_link *link = arg;
213 	hci_discon_cp cp;
214 	int err;
215 
216 	mutex_enter(bt_lock);
217 	callout_ack(&link->hl_expire);
218 
219 	if (link->hl_refcnt > 0)
220 		goto out;
221 
222 	DPRINTF("link #%d expired\n", link->hl_handle);
223 
224 	switch (link->hl_state) {
225 	case HCI_LINK_CLOSED:
226 	case HCI_LINK_WAIT_CONNECT:
227 		hci_link_free(link, ECONNRESET);
228 		break;
229 
230 	case HCI_LINK_WAIT_AUTH:
231 	case HCI_LINK_WAIT_ENCRYPT:
232 	case HCI_LINK_WAIT_SECURE:
233 	case HCI_LINK_OPEN:
234 		cp.con_handle = htole16(link->hl_handle);
235 		cp.reason = 0x13; /* "Remote User Terminated Connection" */
236 
237 		err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
238 					&cp, sizeof(cp));
239 
240 		if (err) {
241 			DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
242 			    err);
243 		}
244 
245 		break;
246 
247 	default:
248 		UNKNOWN(link->hl_state);
249 		break;
250 	}
251 
252 out:
253 	mutex_exit(bt_lock);
254 }
255 
256 /*
257  * Initiate any Link Mode change requests.
258  */
259 int
260 hci_acl_setmode(struct hci_link *link)
261 {
262 	int err;
263 
264 	KASSERT(link != NULL);
265 	KASSERT(link->hl_unit != NULL);
266 
267 	if (link->hl_state != HCI_LINK_OPEN)
268 		return EINPROGRESS;
269 
270 	if ((link->hl_flags & HCI_LINK_AUTH_REQ)
271 	    && !(link->hl_flags & HCI_LINK_AUTH)) {
272 		hci_auth_req_cp cp;
273 
274 		DPRINTF("requesting auth for handle #%d\n",
275 			link->hl_handle);
276 
277 		link->hl_state = HCI_LINK_WAIT_AUTH;
278 		cp.con_handle = htole16(link->hl_handle);
279 		err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
280 				   &cp, sizeof(cp));
281 
282 		return (err == 0 ? EINPROGRESS : err);
283 	}
284 
285 	if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
286 	    && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
287 		hci_set_con_encryption_cp cp;
288 
289 		/* XXX we should check features for encryption capability */
290 
291 		DPRINTF("requesting encryption for handle #%d\n",
292 			link->hl_handle);
293 
294 		link->hl_state = HCI_LINK_WAIT_ENCRYPT;
295 		cp.con_handle = htole16(link->hl_handle);
296 		cp.encryption_enable = 0x01;
297 
298 		err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
299 				   &cp, sizeof(cp));
300 
301 		return (err == 0 ? EINPROGRESS : err);
302 	}
303 
304 	if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
305 		hci_change_con_link_key_cp cp;
306 
307 		/* always change link key for SECURE requests */
308 		link->hl_flags &= ~HCI_LINK_SECURE;
309 
310 		DPRINTF("changing link key for handle #%d\n",
311 			link->hl_handle);
312 
313 		link->hl_state = HCI_LINK_WAIT_SECURE;
314 		cp.con_handle = htole16(link->hl_handle);
315 
316 		err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
317 				   &cp, sizeof(cp));
318 
319 		return (err == 0 ? EINPROGRESS : err);
320 	}
321 
322 	return 0;
323 }
324 
325 /*
326  * Link Mode changed.
327  *
328  * This is called from event handlers when the mode change
329  * is complete. We notify upstream and restart the link.
330  */
331 void
332 hci_acl_linkmode(struct hci_link *link)
333 {
334 	struct l2cap_channel *chan, *next;
335 	int err, mode = 0;
336 
337 	DPRINTF("handle #%d, auth %s, encrypt %s, secure %s\n",
338 		link->hl_handle,
339 		(link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
340 		(link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
341 		(link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
342 
343 	if (link->hl_flags & HCI_LINK_AUTH)
344 		mode |= L2CAP_LM_AUTH;
345 
346 	if (link->hl_flags & HCI_LINK_ENCRYPT)
347 		mode |= L2CAP_LM_ENCRYPT;
348 
349 	if (link->hl_flags & HCI_LINK_SECURE)
350 		mode |= L2CAP_LM_SECURE;
351 
352 	/*
353 	 * The link state will only be OPEN here if the mode change
354 	 * was successful. So, we can proceed with L2CAP connections,
355 	 * or notify already establshed channels, to allow any that
356 	 * are dissatisfied to disconnect before we restart.
357 	 */
358 	next = LIST_FIRST(&l2cap_active_list);
359 	while ((chan = next) != NULL) {
360 		next = LIST_NEXT(chan, lc_ncid);
361 
362 		if (chan->lc_link != link)
363 			continue;
364 
365 		switch(chan->lc_state) {
366 		case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
367 			if ((mode & chan->lc_mode) != chan->lc_mode) {
368 				l2cap_close(chan, ECONNABORTED);
369 				break;
370 			}
371 
372 			chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
373 			err = l2cap_send_connect_req(chan);
374 			if (err) {
375 				l2cap_close(chan, err);
376 				break;
377 			}
378 			break;
379 
380 		case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
381 			if ((mode & chan->lc_mode) != chan->lc_mode) {
382 				l2cap_send_connect_rsp(link, chan->lc_ident,
383 							0, chan->lc_rcid,
384 							L2CAP_SECURITY_BLOCK);
385 
386 				l2cap_close(chan, ECONNABORTED);
387 				break;
388 			}
389 
390 			l2cap_send_connect_rsp(link, chan->lc_ident,
391 						chan->lc_lcid, chan->lc_rcid,
392 						L2CAP_SUCCESS);
393 
394 			chan->lc_state = L2CAP_WAIT_CONFIG;
395 			chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
396 			err = l2cap_send_config_req(chan);
397 			if (err) {
398 				l2cap_close(chan, err);
399 				break;
400 			}
401 			break;
402 
403 		case L2CAP_WAIT_RECV_CONNECT_RSP:
404 		case L2CAP_WAIT_CONFIG:
405 		case L2CAP_OPEN: /* already established */
406 			(*chan->lc_proto->linkmode)(chan->lc_upper, mode);
407 			break;
408 
409 		default:
410 			break;
411 		}
412 	}
413 
414 	link->hl_state = HCI_LINK_OPEN;
415 	hci_acl_start(link);
416 }
417 
418 /*
419  * Receive ACL Data
420  *
421  * we accumulate packet fragments on the hci_link structure
422  * until a full L2CAP frame is ready, then send it on.
423  */
424 void
425 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
426 {
427 	struct hci_link *link;
428 	hci_acldata_hdr_t hdr;
429 	uint16_t handle, want;
430 	int pb, got;
431 
432 	KASSERT(m != NULL);
433 	KASSERT(unit != NULL);
434 
435 	KASSERT(m->m_pkthdr.len >= sizeof(hdr));
436 	m_copydata(m, 0, sizeof(hdr), &hdr);
437 	m_adj(m, sizeof(hdr));
438 
439 #ifdef DIAGNOSTIC
440 	if (hdr.type != HCI_ACL_DATA_PKT) {
441 		aprint_error_dev(unit->hci_dev, "bad ACL packet type\n");
442 		goto bad;
443 	}
444 
445 	if (m->m_pkthdr.len != le16toh(hdr.length)) {
446 		aprint_error_dev(unit->hci_dev,
447 		    "bad ACL packet length (%d != %d)\n",
448 		    m->m_pkthdr.len, le16toh(hdr.length));
449 		goto bad;
450 	}
451 #endif
452 
453 	hdr.length = le16toh(hdr.length);
454 	hdr.con_handle = le16toh(hdr.con_handle);
455 	handle = HCI_CON_HANDLE(hdr.con_handle);
456 	pb = HCI_PB_FLAG(hdr.con_handle);
457 
458 	link = hci_link_lookup_handle(unit, handle);
459 	if (link == NULL) {
460 		hci_discon_cp cp;
461 
462 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
463 			device_xname(unit->hci_dev), handle);
464 
465 		/*
466 		 * There is no way to find out what this connection handle is
467 		 * for, just get rid of it. This may happen, if a USB dongle
468 		 * is plugged into a self powered hub and does not reset when
469 		 * the system is shut down.
470 		 *
471 		 * This can cause a problem with some Broadcom controllers
472 		 * which emit empty ACL packets during connection setup, so
473 		 * only disconnect where data is present.
474 		 */
475 		if (hdr.length > 0) {
476 			cp.con_handle = htole16(handle);
477 			cp.reason = 0x13;/*"Remote User Terminated Connection"*/
478 			hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
479 		}
480 		goto bad;
481 	}
482 
483 	switch (pb) {
484 	case HCI_PACKET_START:
485 		if (link->hl_rxp != NULL)
486 			aprint_error_dev(unit->hci_dev,
487 			    "dropped incomplete ACL packet\n");
488 
489 		if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
490 			aprint_error_dev(unit->hci_dev, "short ACL packet\n");
491 			goto bad;
492 		}
493 
494 		link->hl_rxp = m;
495 		got = m->m_pkthdr.len;
496 		break;
497 
498 	case HCI_PACKET_FRAGMENT:
499 		if (link->hl_rxp == NULL) {
500 			aprint_error_dev(unit->hci_dev,
501 			    "unexpected packet fragment\n");
502 
503 			goto bad;
504 		}
505 
506 		got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
507 		m_cat(link->hl_rxp, m);
508 		m = link->hl_rxp;
509 		m->m_pkthdr.len = got;
510 		break;
511 
512 	default:
513 		aprint_error_dev(unit->hci_dev, "unknown packet type\n");
514 		goto bad;
515 	}
516 
517 	m_copydata(m, 0, sizeof(want), &want);
518 	want = le16toh(want) + sizeof(l2cap_hdr_t) - got;
519 
520 	if (want > 0)
521 		return;
522 
523 	link->hl_rxp = NULL;
524 
525 	if (want == 0) {
526 		l2cap_recv_frame(m, link);
527 		return;
528 	}
529 
530 bad:
531 	m_freem(m);
532 }
533 
534 /*
535  * Send ACL data on link
536  *
537  * We must fragment packets into chunks of less than unit->hci_max_acl_size and
538  * prepend a relevant ACL header to each fragment. We keep a PDU structure
539  * attached to the link, so that completed fragments can be marked off and
540  * more data requested from above once the PDU is sent.
541  */
542 int
543 hci_acl_send(struct mbuf *m, struct hci_link *link,
544 		struct l2cap_channel *chan)
545 {
546 	struct l2cap_pdu *pdu;
547 	struct mbuf *n = NULL;
548 	int plen, mlen, num = 0;
549 
550 	KASSERT(link != NULL);
551 	KASSERT(m != NULL);
552 	KASSERT(m->m_flags & M_PKTHDR);
553 	KASSERT(m->m_pkthdr.len > 0);
554 
555 	if (link->hl_state == HCI_LINK_CLOSED) {
556 		m_freem(m);
557 		return ENETDOWN;
558 	}
559 
560 	pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT);
561 	if (pdu == NULL)
562 		goto nomem;
563 
564 	pdu->lp_chan = chan;
565 	pdu->lp_pending = 0;
566 	MBUFQ_INIT(&pdu->lp_data);
567 
568 	plen = m->m_pkthdr.len;
569 	mlen = link->hl_unit->hci_max_acl_size;
570 
571 	DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
572 		device_xname(link->hl_unit->hci_dev), link->hl_handle, plen, mlen);
573 
574 	while (plen > 0) {
575 		if (plen > mlen) {
576 			n = m_split(m, mlen, M_DONTWAIT);
577 			if (n == NULL)
578 				goto nomem;
579 		} else {
580 			mlen = plen;
581 		}
582 
583 		if (num++ == 0)
584 			m->m_flags |= M_PROTO1;	/* tag first fragment */
585 
586 		DPRINTFN(10, "chunk of %d (plen = %d) bytes\n", mlen, plen);
587 		MBUFQ_ENQUEUE(&pdu->lp_data, m);
588 		m = n;
589 		plen -= mlen;
590 	}
591 
592 	TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
593 	link->hl_txqlen += num;
594 
595 	hci_acl_start(link);
596 
597 	return 0;
598 
599 nomem:
600 	if (m) m_freem(m);
601 	if (pdu) {
602 		MBUFQ_DRAIN(&pdu->lp_data);
603 		pool_put(&l2cap_pdu_pool, pdu);
604 	}
605 
606 	return ENOMEM;
607 }
608 
609 /*
610  * Start sending ACL data on link.
611  *
612  *	This is called when the queue may need restarting: as new data
613  * is queued, after link mode changes have completed, or when device
614  * buffers have cleared.
615  *
616  *	We may use all the available packet slots. The reason that we add
617  * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
618  * signal packets may be queued before the handle is given to us..
619  */
620 void
621 hci_acl_start(struct hci_link *link)
622 {
623 	struct hci_unit *unit;
624 	hci_acldata_hdr_t *hdr;
625 	struct l2cap_pdu *pdu;
626 	struct mbuf *m;
627 	uint16_t handle;
628 
629 	KASSERT(link != NULL);
630 
631 	unit = link->hl_unit;
632 	KASSERT(unit != NULL);
633 
634 	/* this is mainly to block ourselves (below) */
635 	if (link->hl_state != HCI_LINK_OPEN)
636 		return;
637 
638 	if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
639 		return;
640 
641 	/* find first PDU with data to send */
642 	pdu = TAILQ_FIRST(&link->hl_txq);
643 	for (;;) {
644 		if (pdu == NULL)
645 			return;
646 
647 		if (MBUFQ_FIRST(&pdu->lp_data) != NULL)
648 			break;
649 
650 		pdu = TAILQ_NEXT(pdu, lp_next);
651 	}
652 
653 	while (unit->hci_num_acl_pkts > 0) {
654 		MBUFQ_DEQUEUE(&pdu->lp_data, m);
655 		KASSERT(m != NULL);
656 
657 		if (m->m_flags & M_PROTO1)
658 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
659 						HCI_PACKET_START, 0);
660 		else
661 			handle = HCI_MK_CON_HANDLE(link->hl_handle,
662 						HCI_PACKET_FRAGMENT, 0);
663 
664 		M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
665 		if (m == NULL)
666 			break;
667 
668 		hdr = mtod(m, hci_acldata_hdr_t *);
669 		hdr->type = HCI_ACL_DATA_PKT;
670 		hdr->con_handle = htole16(handle);
671 		hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
672 
673 		link->hl_txqlen--;
674 		pdu->lp_pending++;
675 
676 		hci_output_acl(unit, m);
677 
678 		if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
679 			if (pdu->lp_chan) {
680 				/*
681 				 * This should enable streaming of PDUs - when
682 				 * we have placed all the fragments on the acl
683 				 * output queue, we trigger the L2CAP layer to
684 				 * send us down one more. Use a false state so
685 				 * we dont run into ourselves coming back from
686 				 * the future..
687 				 */
688 				link->hl_state = HCI_LINK_BLOCK;
689 				l2cap_start(pdu->lp_chan);
690 				link->hl_state = HCI_LINK_OPEN;
691 			}
692 
693 			pdu = TAILQ_NEXT(pdu, lp_next);
694 			if (pdu == NULL)
695 				break;
696 		}
697 	}
698 
699 	/*
700 	 * We had our turn now, move to the back of the queue to let
701 	 * other links have a go at the output buffers..
702 	 */
703 	if (TAILQ_NEXT(link, hl_next)) {
704 		TAILQ_REMOVE(&unit->hci_links, link, hl_next);
705 		TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
706 	}
707 }
708 
709 /*
710  * Confirm ACL packets cleared from Controller buffers. We scan our PDU
711  * list to clear pending fragments and signal upstream for more data
712  * when a PDU is complete.
713  */
714 void
715 hci_acl_complete(struct hci_link *link, int num)
716 {
717 	struct l2cap_pdu *pdu;
718 	struct l2cap_channel *chan;
719 
720 	DPRINTFN(5, "handle #%d (%d)\n", link->hl_handle, num);
721 
722 	while (num > 0) {
723 		pdu = TAILQ_FIRST(&link->hl_txq);
724 		if (pdu == NULL) {
725 			aprint_error_dev(link->hl_unit->hci_dev,
726 			    "%d packets completed on handle #%x but none pending!\n",
727 			    num, link->hl_handle);
728 
729 			return;
730 		}
731 
732 		if (num >= pdu->lp_pending) {
733 			num -= pdu->lp_pending;
734 			pdu->lp_pending = 0;
735 
736 			if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
737 				TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
738 				chan = pdu->lp_chan;
739 				if (chan != NULL) {
740 					chan->lc_pending--;
741 					(*chan->lc_proto->complete)
742 							(chan->lc_upper, 1);
743 
744 					if (chan->lc_pending == 0)
745 						l2cap_start(chan);
746 				}
747 
748 				pool_put(&l2cap_pdu_pool, pdu);
749 			}
750 		} else {
751 			pdu->lp_pending -= num;
752 			num = 0;
753 		}
754 	}
755 }
756 
757 /*******************************************************************************
758  *
759  *	HCI SCO Connections
760  */
761 
762 /*
763  * Incoming SCO Connection. We check the list for anybody willing
764  * to take it.
765  */
766 struct hci_link *
767 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
768 {
769 	struct sockaddr_bt laddr, raddr;
770 	struct sco_pcb *pcb, *new;
771 	struct hci_link *sco, *acl;
772 
773 	memset(&laddr, 0, sizeof(laddr));
774 	laddr.bt_len = sizeof(laddr);
775 	laddr.bt_family = AF_BLUETOOTH;
776 	bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
777 
778 	memset(&raddr, 0, sizeof(raddr));
779 	raddr.bt_len = sizeof(raddr);
780 	raddr.bt_family = AF_BLUETOOTH;
781 	bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
782 
783 	/*
784 	 * There should already be an ACL link up and running before
785 	 * the controller sends us SCO connection requests, but you
786 	 * never know..
787 	 */
788 	acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
789 	if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
790 		return NULL;
791 
792 	LIST_FOREACH(pcb, &sco_pcb, sp_next) {
793 		if ((pcb->sp_flags & SP_LISTENING) == 0)
794 			continue;
795 
796 		new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
797 		if (new == NULL)
798 			continue;
799 
800 		/*
801 		 * Ok, got new pcb so we can start a new link and fill
802 		 * in all the details.
803 		 */
804 		bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
805 		bdaddr_copy(&new->sp_raddr, bdaddr);
806 
807 		sco = hci_link_alloc(unit, bdaddr, HCI_LINK_SCO);
808 		if (sco == NULL) {
809 			sco_detach(&new);
810 			return NULL;
811 		}
812 
813 		sco->hl_link = hci_acl_open(unit, bdaddr);
814 		KASSERT(sco->hl_link == acl);
815 
816 		sco->hl_sco = new;
817 		new->sp_link = sco;
818 
819 		new->sp_mtu = unit->hci_max_sco_size;
820 		return sco;
821 	}
822 
823 	return NULL;
824 }
825 
826 /*
827  * receive SCO packet, we only need to strip the header and send
828  * it to the right handler
829  */
830 void
831 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
832 {
833 	struct hci_link *link;
834 	hci_scodata_hdr_t hdr;
835 	uint16_t handle;
836 
837 	KASSERT(m != NULL);
838 	KASSERT(unit != NULL);
839 
840 	KASSERT(m->m_pkthdr.len >= sizeof(hdr));
841 	m_copydata(m, 0, sizeof(hdr), &hdr);
842 	m_adj(m, sizeof(hdr));
843 
844 #ifdef DIAGNOSTIC
845 	if (hdr.type != HCI_SCO_DATA_PKT) {
846 		aprint_error_dev(unit->hci_dev, "bad SCO packet type\n");
847 		goto bad;
848 	}
849 
850 	if (m->m_pkthdr.len != hdr.length) {
851 		aprint_error_dev(unit->hci_dev,
852 		    "bad SCO packet length (%d != %d)\n",
853 		    m->m_pkthdr.len, hdr.length);
854 
855 		goto bad;
856 	}
857 #endif
858 
859 	hdr.con_handle = le16toh(hdr.con_handle);
860 	handle = HCI_CON_HANDLE(hdr.con_handle);
861 
862 	link = hci_link_lookup_handle(unit, handle);
863 	if (link == NULL || link->hl_type == HCI_LINK_ACL) {
864 		DPRINTF("%s: dumping packet for unknown handle #%d\n",
865 			device_xname(unit->hci_dev), handle);
866 
867 		goto bad;
868 	}
869 
870 	(*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
871 	return;
872 
873 bad:
874 	m_freem(m);
875 }
876 
877 void
878 hci_sco_start(struct hci_link *link)
879 {
880 }
881 
882 /*
883  * SCO packets have completed at the controller, so we can
884  * signal up to free the buffer space.
885  */
886 void
887 hci_sco_complete(struct hci_link *link, int num)
888 {
889 
890 	DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
891 	link->hl_sco->sp_pending--;
892 	(*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
893 }
894 
895 /*******************************************************************************
896  *
897  *	Generic HCI Connection alloc/free/lookup etc
898  */
899 
900 struct hci_link *
901 hci_link_alloc(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
902 {
903 	struct hci_link *link;
904 
905 	KASSERT(unit != NULL);
906 
907 	link = malloc(sizeof(struct hci_link), M_BLUETOOTH, M_NOWAIT | M_ZERO);
908 	if (link == NULL)
909 		return NULL;
910 
911 	link->hl_unit = unit;
912 	link->hl_type = type;
913 	link->hl_state = HCI_LINK_CLOSED;
914 	bdaddr_copy(&link->hl_bdaddr, bdaddr);
915 
916 	/* init ACL portion */
917 	callout_init(&link->hl_expire, 0);
918 	callout_setfunc(&link->hl_expire, hci_acl_timeout, link);
919 
920 	TAILQ_INIT(&link->hl_txq);	/* outgoing packets */
921 	TAILQ_INIT(&link->hl_reqs);	/* request queue */
922 
923 	link->hl_mtu = L2CAP_MTU_DEFAULT;		/* L2CAP signal mtu */
924 	link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT;	/* flush timeout */
925 
926 	/* init SCO portion */
927 	MBUFQ_INIT(&link->hl_data);
928 
929 	/* attach to unit */
930 	TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
931 	return link;
932 }
933 
934 void
935 hci_link_free(struct hci_link *link, int err)
936 {
937 	struct l2cap_req *req;
938 	struct l2cap_pdu *pdu;
939 	struct l2cap_channel *chan, *next;
940 
941 	KASSERT(link != NULL);
942 
943 	DPRINTF("#%d, type = %d, state = %d, refcnt = %d\n",
944 		link->hl_handle, link->hl_type,
945 		link->hl_state, link->hl_refcnt);
946 
947 	/* ACL reference count */
948 	if (link->hl_refcnt > 0) {
949 		next = LIST_FIRST(&l2cap_active_list);
950 		while ((chan = next) != NULL) {
951 			next = LIST_NEXT(chan, lc_ncid);
952 			if (chan->lc_link == link)
953 				l2cap_close(chan, err);
954 		}
955 	}
956 	KASSERT(link->hl_refcnt == 0);
957 
958 	/* ACL L2CAP requests.. */
959 	while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
960 		l2cap_request_free(req);
961 
962 	KASSERT(TAILQ_EMPTY(&link->hl_reqs));
963 
964 	/* ACL outgoing data queue */
965 	while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
966 		TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
967 		MBUFQ_DRAIN(&pdu->lp_data);
968 		if (pdu->lp_pending)
969 			link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
970 
971 		pool_put(&l2cap_pdu_pool, pdu);
972 	}
973 
974 	KASSERT(TAILQ_EMPTY(&link->hl_txq));
975 
976 	/* ACL incoming data packet */
977 	if (link->hl_rxp != NULL) {
978 		m_freem(link->hl_rxp);
979 		link->hl_rxp = NULL;
980 	}
981 
982 	/* SCO master ACL link */
983 	if (link->hl_link != NULL) {
984 		hci_acl_close(link->hl_link, err);
985 		link->hl_link = NULL;
986 	}
987 
988 	/* SCO pcb */
989 	if (link->hl_sco != NULL) {
990 		struct sco_pcb *pcb;
991 
992 		pcb = link->hl_sco;
993 		pcb->sp_link = NULL;
994 		link->hl_sco = NULL;
995 		(*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
996 	}
997 
998 	/* flush any SCO data */
999 	MBUFQ_DRAIN(&link->hl_data);
1000 
1001 	/*
1002 	 * Halt the callout - if its already running we cannot free the
1003 	 * link structure but the timeout function will call us back in
1004 	 * any case.
1005 	 */
1006 	link->hl_state = HCI_LINK_CLOSED;
1007 	callout_stop(&link->hl_expire);
1008 	if (callout_invoking(&link->hl_expire))
1009 		return;
1010 
1011 	callout_destroy(&link->hl_expire);
1012 
1013 	/*
1014 	 * If we made a note of clock offset, keep it in a memo
1015 	 * to facilitate reconnections to this device
1016 	 */
1017 	if (link->hl_clock != 0) {
1018 		struct hci_memo *memo;
1019 
1020 		memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1021 		if (memo != NULL)
1022 			memo->clock_offset = link->hl_clock;
1023 	}
1024 
1025 	TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1026 	free(link, M_BLUETOOTH);
1027 }
1028 
1029 /*
1030  * Lookup HCI link by address and type. Note that for SCO links there may
1031  * be more than one link per address, so we only return links with no
1032  * handle (ie new links)
1033  */
1034 struct hci_link *
1035 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
1036 {
1037 	struct hci_link *link;
1038 
1039 	KASSERT(unit != NULL);
1040 	KASSERT(bdaddr != NULL);
1041 
1042 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1043 		if (link->hl_type != type)
1044 			continue;
1045 
1046 		if (type == HCI_LINK_SCO && link->hl_handle != 0)
1047 			continue;
1048 
1049 		if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1050 			break;
1051 	}
1052 
1053 	return link;
1054 }
1055 
1056 struct hci_link *
1057 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1058 {
1059 	struct hci_link *link;
1060 
1061 	KASSERT(unit != NULL);
1062 
1063 	TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1064 		if (handle == link->hl_handle)
1065 			break;
1066 	}
1067 
1068 	return link;
1069 }
1070