1 /* $NetBSD: hci_link.c,v 1.7 2006/10/12 01:32:37 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 Iain Hibbert. 5 * Copyright (c) 2006 Itronix Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of Itronix Inc. may not be used to endorse 17 * or promote products derived from this software without specific 18 * prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY 24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 27 * ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: hci_link.c,v 1.7 2006/10/12 01:32:37 christos Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/proc.h> 41 #include <sys/queue.h> 42 #include <sys/systm.h> 43 44 #include <netbt/bluetooth.h> 45 #include <netbt/hci.h> 46 #include <netbt/l2cap.h> 47 #include <netbt/sco.h> 48 49 /******************************************************************************* 50 * 51 * HCI ACL Connections 52 */ 53 54 /* 55 * Automatically expire unused ACL connections after this number of 56 * seconds (if zero, do not expire unused connections) [sysctl] 57 */ 58 int hci_acl_expiry = 10; /* seconds */ 59 60 /* 61 * hci_acl_open(unit, bdaddr) 62 * 63 * open ACL connection to remote bdaddr. Only one ACL connection is permitted 64 * between any two Bluetooth devices, so we look for an existing one before 65 * trying to start a new one. 66 */ 67 struct hci_link * 68 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr) 69 { 70 struct hci_link *link; 71 struct hci_memo *memo; 72 hci_create_con_cp cp; 73 int err; 74 75 KASSERT(unit); 76 KASSERT(bdaddr); 77 78 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL); 79 if (link == NULL) { 80 link = hci_link_alloc(unit); 81 if (link == NULL) 82 return NULL; 83 84 link->hl_type = HCI_LINK_ACL; 85 bdaddr_copy(&link->hl_bdaddr, bdaddr); 86 } 87 88 switch(link->hl_state) { 89 case HCI_LINK_CLOSED: 90 /* 91 * open connection to remote device 92 */ 93 memset(&cp, 0, sizeof(cp)); 94 bdaddr_copy(&cp.bdaddr, bdaddr); 95 cp.pkt_type = htole16(unit->hci_packet_type); 96 97 memo = hci_memo_find(unit, bdaddr); 98 if (memo != NULL) { 99 cp.page_scan_rep_mode = memo->response.page_scan_rep_mode; 100 cp.page_scan_mode = memo->response.page_scan_mode; 101 cp.clock_offset = htole16(memo->response.clock_offset); 102 } 103 104 if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH) 105 cp.accept_role_switch = 1; 106 107 err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp)); 108 if (err) { 109 hci_link_free(link, err); 110 return NULL; 111 } 112 113 link->hl_state = HCI_LINK_WAIT_CONNECT; 114 break; 115 116 case HCI_LINK_WAIT_CONNECT: 117 /* 118 * somebody else already trying to connect, we just 119 * sit on the bench with them.. 120 */ 121 break; 122 123 case HCI_LINK_OPEN: 124 /* 125 * If already open, halt any expiry timeouts. We dont need 126 * to care about already invoking timeouts since refcnt >0 127 * will keep the link alive. 128 */ 129 callout_stop(&link->hl_expire); 130 break; 131 132 default: 133 UNKNOWN(link->hl_state); 134 return NULL; 135 } 136 137 /* open */ 138 link->hl_refcnt++; 139 140 return link; 141 } 142 143 /* 144 * Close ACL connection. When there are no more references to this link, 145 * we can either close it down or schedule a delayed closedown. 146 */ 147 void 148 hci_acl_close(struct hci_link *link, int err) 149 { 150 151 KASSERT(link); 152 153 if (--link->hl_refcnt == 0) { 154 if (link->hl_state == HCI_LINK_CLOSED) 155 hci_link_free(link, err); 156 else if (hci_acl_expiry > 0) 157 callout_schedule(&link->hl_expire, hci_acl_expiry * hz); 158 } 159 } 160 161 /* 162 * Incoming ACL connection. 163 * 164 * For now, we accept all connections but it would be better to check 165 * the L2CAP listen list and only accept when there is a listener 166 * available. 167 * 168 * There should not be a link to the same bdaddr already, we check 169 * anyway though its left unhandled for now. 170 */ 171 struct hci_link * 172 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr) 173 { 174 struct hci_link *link; 175 176 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL); 177 if (link != NULL) 178 return NULL; 179 180 link = hci_link_alloc(unit); 181 if (link != NULL) { 182 link->hl_state = HCI_LINK_WAIT_CONNECT; 183 link->hl_type = HCI_LINK_ACL; 184 bdaddr_copy(&link->hl_bdaddr, bdaddr); 185 186 if (hci_acl_expiry > 0) 187 callout_schedule(&link->hl_expire, hci_acl_expiry * hz); 188 } 189 190 return link; 191 } 192 193 void 194 hci_acl_timeout(void *arg) 195 { 196 struct hci_link *link = arg; 197 hci_discon_cp cp; 198 int s, err; 199 200 s = splsoftnet(); 201 callout_ack(&link->hl_expire); 202 203 if (link->hl_refcnt > 0) 204 goto out; 205 206 DPRINTF("link #%d expired\n", link->hl_handle); 207 208 switch (link->hl_state) { 209 case HCI_LINK_CLOSED: 210 case HCI_LINK_WAIT_CONNECT: 211 hci_link_free(link, ECONNRESET); 212 break; 213 214 case HCI_LINK_OPEN: 215 cp.con_handle = htole16(link->hl_handle); 216 cp.reason = 0x13; /* "Remote User Terminated Connection" */ 217 218 err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT, 219 &cp, sizeof(cp)); 220 221 if (err) { 222 DPRINTF("error %d sending HCI_CMD_DISCONNECT\n", 223 err); 224 } 225 226 break; 227 228 default: 229 UNKNOWN(link->hl_state); 230 break; 231 } 232 233 out: 234 splx(s); 235 } 236 237 /* 238 * Receive ACL Data 239 * 240 * we accumulate packet fragments on the hci_link structure 241 * until a full L2CAP frame is ready, then send it on. 242 */ 243 void 244 hci_acl_recv(struct mbuf *m, struct hci_unit *unit) 245 { 246 struct hci_link *link; 247 hci_acldata_hdr_t hdr; 248 uint16_t handle, want; 249 int pb, got; 250 251 KASSERT(m); 252 KASSERT(unit); 253 254 KASSERT(m->m_pkthdr.len >= sizeof(hdr)); 255 m_copydata(m, 0, sizeof(hdr), &hdr); 256 m_adj(m, sizeof(hdr)); 257 258 #ifdef DIAGNOSTIC 259 if (hdr.type != HCI_ACL_DATA_PKT) { 260 printf("%s: bad ACL packet type\n", unit->hci_devname); 261 goto bad; 262 } 263 264 if (m->m_pkthdr.len != le16toh(hdr.length)) { 265 printf("%s: bad ACL packet length (%d != %d)\n", 266 unit->hci_devname, m->m_pkthdr.len, le16toh(hdr.length)); 267 goto bad; 268 } 269 #endif 270 271 hdr.length = le16toh(hdr.length); 272 hdr.con_handle = le16toh(hdr.con_handle); 273 handle = HCI_CON_HANDLE(hdr.con_handle); 274 pb = HCI_PB_FLAG(hdr.con_handle); 275 276 link = hci_link_lookup_handle(unit, handle); 277 if (link == NULL) { 278 hci_discon_cp cp; 279 280 DPRINTF("%s: dumping packet for unknown handle #%d\n", 281 unit->hci_devname, handle); 282 283 /* 284 * There is no way to find out what this connection handle is 285 * for, just get rid of it. This may happen, if a USB dongle 286 * is plugged into a self powered hub and does not reset when 287 * the system is shut down. 288 */ 289 cp.con_handle = htole16(handle); 290 cp.reason = 0x13; /* "Remote User Terminated Connection" */ 291 hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp)); 292 goto bad; 293 } 294 295 switch (pb) { 296 case HCI_PACKET_START: 297 if (link->hl_rxp != NULL) 298 printf("%s: dropped incomplete ACL packet\n", 299 unit->hci_devname); 300 301 if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) { 302 printf("%s: short ACL packet\n", 303 unit->hci_devname); 304 305 goto bad; 306 } 307 308 link->hl_rxp = m; 309 got = m->m_pkthdr.len; 310 break; 311 312 case HCI_PACKET_FRAGMENT: 313 if (link->hl_rxp == NULL) { 314 printf("%s: unexpected packet fragment\n", 315 unit->hci_devname); 316 317 goto bad; 318 } 319 320 got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len; 321 m_cat(link->hl_rxp, m); 322 m = link->hl_rxp; 323 m->m_pkthdr.len = got; 324 break; 325 326 default: 327 printf("%s: unknown packet type\n", 328 unit->hci_devname); 329 330 goto bad; 331 } 332 333 m_copydata(m, 0, sizeof(want), &want); 334 want = le16toh(want) + sizeof(l2cap_hdr_t) - got; 335 336 if (want > 0) 337 return; 338 339 link->hl_rxp = NULL; 340 341 if (want == 0) { 342 l2cap_recv_frame(m, link); 343 return; 344 } 345 346 bad: 347 m_freem(m); 348 } 349 350 /* 351 * Send ACL data on link 352 * 353 * We must fragment packets into chunks of less than unit->hci_max_acl_size and 354 * prepend a relevant ACL header to each fragment. We keep a PDU structure 355 * attached to the link, so that completed fragments can be marked off and 356 * more data requested from above once the PDU is sent. 357 */ 358 int 359 hci_acl_send(struct mbuf *m, struct hci_link *link, 360 struct l2cap_channel *chan) 361 { 362 struct l2cap_pdu *pdu; 363 struct mbuf *n = NULL; 364 int plen, mlen, num = 0; 365 366 KASSERT(link); 367 KASSERT(m); 368 KASSERT(m->m_flags & M_PKTHDR); 369 KASSERT(m->m_pkthdr.len > 0); 370 371 if (link->hl_state == HCI_LINK_CLOSED) { 372 m_freem(m); 373 return ENETDOWN; 374 } 375 376 pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT); 377 if (pdu == NULL) 378 goto nomem; 379 380 pdu->lp_chan = chan; 381 pdu->lp_pending = 0; 382 MBUFQ_INIT(&pdu->lp_data); 383 384 plen = m->m_pkthdr.len; 385 mlen = link->hl_unit->hci_max_acl_size; 386 387 DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n", 388 link->hl_unit->hci_devname, link->hl_handle, plen, mlen); 389 390 while (plen > 0) { 391 if (plen > mlen) { 392 n = m_split(m, mlen, M_DONTWAIT); 393 if (n == NULL) 394 goto nomem; 395 } else { 396 mlen = plen; 397 } 398 399 if (num++ == 0) 400 m->m_flags |= M_PROTO1; /* tag first fragment */ 401 402 DPRINTFN(10, "chunk of %d (plen = %d) bytes\n", mlen, plen); 403 MBUFQ_ENQUEUE(&pdu->lp_data, m); 404 m = n; 405 plen -= mlen; 406 } 407 408 TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next); 409 link->hl_txqlen += num; 410 411 hci_acl_start(link); 412 413 return 0; 414 415 nomem: 416 if (m) m_freem(m); 417 if (n) m_freem(n); 418 if (pdu) { 419 MBUFQ_DRAIN(&pdu->lp_data); 420 pool_put(&l2cap_pdu_pool, pdu); 421 } 422 423 return ENOMEM; 424 } 425 426 /* 427 * Start sending ACL data on link. 428 * 429 * We may use all the available packet slots. The reason that we add 430 * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP 431 * signal packets may be queued before the handle is given to us.. 432 * 433 * this is called from hci_acl_send() above, and the event processing 434 * code (for CON_COMPL and NUM_COMPL_PKTS) 435 */ 436 void 437 hci_acl_start(struct hci_link *link) 438 { 439 struct hci_unit *unit; 440 hci_acldata_hdr_t *hdr; 441 struct l2cap_pdu *pdu; 442 struct mbuf *m; 443 uint16_t handle; 444 445 KASSERT(link); 446 447 unit = link->hl_unit; 448 KASSERT(unit); 449 450 /* this is mainly to block ourselves (below) */ 451 if (link->hl_state != HCI_LINK_OPEN) 452 return; 453 454 if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0) 455 return; 456 457 /* find first PDU with data to send */ 458 pdu = TAILQ_FIRST(&link->hl_txq); 459 for (;;) { 460 if (pdu == NULL) 461 return; 462 463 if (MBUFQ_FIRST(&pdu->lp_data) != NULL) 464 break; 465 466 pdu = TAILQ_NEXT(pdu, lp_next); 467 } 468 469 while (unit->hci_num_acl_pkts > 0) { 470 MBUFQ_DEQUEUE(&pdu->lp_data, m); 471 KASSERT(m != NULL); 472 473 if (m->m_flags & M_PROTO1) 474 handle = HCI_MK_CON_HANDLE(link->hl_handle, 475 HCI_PACKET_START, 0); 476 else 477 handle = HCI_MK_CON_HANDLE(link->hl_handle, 478 HCI_PACKET_FRAGMENT, 0); 479 480 M_PREPEND(m, sizeof(*hdr), M_DONTWAIT); 481 if (m == NULL) 482 break; 483 484 hdr = mtod(m, hci_acldata_hdr_t *); 485 hdr->type = HCI_ACL_DATA_PKT; 486 hdr->con_handle = htole16(handle); 487 hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr)); 488 489 link->hl_txqlen--; 490 pdu->lp_pending++; 491 492 hci_output_acl(unit, m); 493 494 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) { 495 if (pdu->lp_chan) { 496 /* 497 * This should enable streaming of PDUs - when 498 * we have placed all the fragments on the acl 499 * output queue, we trigger the L2CAP layer to 500 * send us down one more. Use a false state so 501 * we dont run into ourselves coming back from 502 * the future.. 503 */ 504 link->hl_state = HCI_LINK_BLOCK; 505 l2cap_start(pdu->lp_chan); 506 link->hl_state = HCI_LINK_OPEN; 507 } 508 509 pdu = TAILQ_NEXT(pdu, lp_next); 510 if (pdu == NULL) 511 break; 512 } 513 } 514 515 /* 516 * We had our turn now, move to the back of the queue to let 517 * other links have a go at the output buffers.. 518 */ 519 if (TAILQ_NEXT(link, hl_next)) { 520 TAILQ_REMOVE(&unit->hci_links, link, hl_next); 521 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next); 522 } 523 } 524 525 /* 526 * Confirm ACL packets cleared from Controller buffers. We scan our PDU 527 * list to clear pending fragments and signal upstream for more data 528 * when a PDU is complete. 529 */ 530 void 531 hci_acl_complete(struct hci_link *link, int num) 532 { 533 struct l2cap_pdu *pdu; 534 struct l2cap_channel *chan; 535 536 DPRINTFN(5, "handle #%d (%d)\n", link->hl_handle, num); 537 538 while (num > 0) { 539 pdu = TAILQ_FIRST(&link->hl_txq); 540 if (pdu == NULL) { 541 printf("%s: %d packets completed on handle #%x " 542 "but none pending!\n", 543 link->hl_unit->hci_devname, num, 544 link->hl_handle); 545 return; 546 } 547 548 if (num >= pdu->lp_pending) { 549 num -= pdu->lp_pending; 550 pdu->lp_pending = 0; 551 552 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) { 553 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next); 554 chan = pdu->lp_chan; 555 if (chan != NULL) { 556 chan->lc_pending--; 557 (*chan->lc_proto->complete) 558 (chan->lc_upper, 1); 559 560 if (chan->lc_pending == 0) 561 l2cap_start(chan); 562 } 563 564 pool_put(&l2cap_pdu_pool, pdu); 565 } 566 } else { 567 pdu->lp_pending -= num; 568 num = 0; 569 } 570 } 571 } 572 573 /******************************************************************************* 574 * 575 * HCI SCO Connections 576 */ 577 578 /* 579 * Incoming SCO Connection. We check the list for anybody willing 580 * to take it. 581 */ 582 struct hci_link * 583 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr) 584 { 585 struct sockaddr_bt laddr, raddr; 586 struct sco_pcb *pcb, *new; 587 struct hci_link *sco, *acl; 588 589 memset(&laddr, 0, sizeof(laddr)); 590 laddr.bt_len = sizeof(laddr); 591 laddr.bt_family = AF_BLUETOOTH; 592 bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr); 593 594 memset(&raddr, 0, sizeof(raddr)); 595 raddr.bt_len = sizeof(raddr); 596 raddr.bt_family = AF_BLUETOOTH; 597 bdaddr_copy(&raddr.bt_bdaddr, bdaddr); 598 599 /* 600 * There should already be an ACL link up and running before 601 * the controller sends us SCO connection requests, but you 602 * never know.. 603 */ 604 acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL); 605 if (acl == NULL || acl->hl_state != HCI_LINK_OPEN) 606 return NULL; 607 608 LIST_FOREACH(pcb, &sco_pcb, sp_next) { 609 if ((pcb->sp_flags & SP_LISTENING) == 0) 610 continue; 611 612 new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr); 613 if (new == NULL) 614 continue; 615 616 /* 617 * Ok, got new pcb so we can start a new link and fill 618 * in all the details. 619 */ 620 bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr); 621 bdaddr_copy(&new->sp_raddr, bdaddr); 622 623 sco = hci_link_alloc(unit); 624 if (sco == NULL) { 625 sco_detach(&new); 626 return NULL; 627 } 628 629 sco->hl_type = HCI_LINK_SCO; 630 bdaddr_copy(&sco->hl_bdaddr, bdaddr); 631 632 sco->hl_link = hci_acl_open(unit, bdaddr); 633 KASSERT(sco->hl_link == acl); 634 635 sco->hl_sco = new; 636 new->sp_link = sco; 637 638 new->sp_mtu = unit->hci_max_sco_size; 639 return sco; 640 } 641 642 return NULL; 643 } 644 645 /* 646 * receive SCO packet, we only need to strip the header and send 647 * it to the right handler 648 */ 649 void 650 hci_sco_recv(struct mbuf *m, struct hci_unit *unit) 651 { 652 struct hci_link *link; 653 hci_scodata_hdr_t hdr; 654 uint16_t handle; 655 656 KASSERT(m); 657 KASSERT(unit); 658 659 KASSERT(m->m_pkthdr.len >= sizeof(hdr)); 660 m_copydata(m, 0, sizeof(hdr), &hdr); 661 m_adj(m, sizeof(hdr)); 662 663 #ifdef DIAGNOSTIC 664 if (hdr.type != HCI_SCO_DATA_PKT) { 665 printf("%s: bad SCO packet type\n", unit->hci_devname); 666 goto bad; 667 } 668 669 if (m->m_pkthdr.len != hdr.length) { 670 printf("%s: bad SCO packet length (%d != %d)\n", unit->hci_devname, m->m_pkthdr.len, hdr.length); 671 goto bad; 672 } 673 #endif 674 675 hdr.con_handle = le16toh(hdr.con_handle); 676 handle = HCI_CON_HANDLE(hdr.con_handle); 677 678 link = hci_link_lookup_handle(unit, handle); 679 if (link == NULL || link->hl_type == HCI_LINK_ACL) { 680 DPRINTF("%s: dumping packet for unknown handle #%d\n", 681 unit->hci_devname, handle); 682 683 goto bad; 684 } 685 686 (*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m); 687 return; 688 689 bad: 690 m_freem(m); 691 } 692 693 void 694 hci_sco_start(struct hci_link *link __unused) 695 { 696 } 697 698 /* 699 * SCO packets have completed at the controller, so we can 700 * signal up to free the buffer space. 701 */ 702 void 703 hci_sco_complete(struct hci_link *link, int num) 704 { 705 706 DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num); 707 link->hl_sco->sp_pending--; 708 (*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num); 709 } 710 711 /******************************************************************************* 712 * 713 * Generic HCI Connection alloc/free/lookup etc 714 */ 715 716 struct hci_link * 717 hci_link_alloc(struct hci_unit *unit) 718 { 719 struct hci_link *link; 720 721 KASSERT(unit); 722 723 link = malloc(sizeof(struct hci_link), M_BLUETOOTH, M_NOWAIT | M_ZERO); 724 if (link == NULL) 725 return NULL; 726 727 link->hl_unit = unit; 728 link->hl_state = HCI_LINK_CLOSED; 729 730 /* init ACL portion */ 731 callout_init(&link->hl_expire); 732 callout_setfunc(&link->hl_expire, hci_acl_timeout, link); 733 734 TAILQ_INIT(&link->hl_txq); /* outgoing packets */ 735 TAILQ_INIT(&link->hl_reqs); /* request queue */ 736 737 link->hl_mtu = L2CAP_MTU_DEFAULT; /* L2CAP signal mtu */ 738 link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT; /* flush timeout */ 739 740 /* init SCO portion */ 741 MBUFQ_INIT(&link->hl_data); 742 743 /* attach to unit */ 744 TAILQ_INSERT_HEAD(&unit->hci_links, link, hl_next); 745 return link; 746 } 747 748 void 749 hci_link_free(struct hci_link *link, int err) 750 { 751 struct l2cap_req *req; 752 struct l2cap_pdu *pdu; 753 struct l2cap_channel *chan, *next; 754 755 KASSERT(link); 756 757 DPRINTF("#%d, type = %d, state = %d, refcnt = %d\n", 758 link->hl_handle, link->hl_type, 759 link->hl_state, link->hl_refcnt); 760 761 /* ACL reference count */ 762 if (link->hl_refcnt > 0) { 763 next = LIST_FIRST(&l2cap_active_list); 764 while ((chan = next) != NULL) { 765 next = LIST_NEXT(chan, lc_ncid); 766 if (chan->lc_link == link) 767 l2cap_close(chan, err); 768 } 769 } 770 KASSERT(link->hl_refcnt == 0); 771 772 /* ACL L2CAP requests.. */ 773 while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL) 774 l2cap_request_free(req); 775 776 KASSERT(TAILQ_EMPTY(&link->hl_reqs)); 777 778 /* ACL outgoing data queue */ 779 while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) { 780 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next); 781 MBUFQ_DRAIN(&pdu->lp_data); 782 if (pdu->lp_pending) 783 link->hl_unit->hci_num_acl_pkts += pdu->lp_pending; 784 785 pool_put(&l2cap_pdu_pool, pdu); 786 } 787 788 KASSERT(TAILQ_EMPTY(&link->hl_txq)); 789 790 /* ACL incoming data packet */ 791 if (link->hl_rxp != NULL) { 792 m_freem(link->hl_rxp); 793 link->hl_rxp = NULL; 794 } 795 796 /* SCO master ACL link */ 797 if (link->hl_link != NULL) { 798 hci_acl_close(link->hl_link, err); 799 link->hl_link = NULL; 800 } 801 802 /* SCO pcb */ 803 if (link->hl_sco != NULL) { 804 struct sco_pcb *pcb; 805 806 pcb = link->hl_sco; 807 pcb->sp_link = NULL; 808 link->hl_sco = NULL; 809 (*pcb->sp_proto->disconnected)(pcb->sp_upper, err); 810 } 811 812 /* flush any SCO data */ 813 MBUFQ_DRAIN(&link->hl_data); 814 815 /* 816 * Halt the callout - if its already running we cannot free the 817 * link structure but the timeout function will call us back in 818 * any case. 819 */ 820 link->hl_state = HCI_LINK_CLOSED; 821 callout_stop(&link->hl_expire); 822 if (callout_invoking(&link->hl_expire)) 823 return; 824 825 TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next); 826 free(link, M_BLUETOOTH); 827 } 828 829 /* 830 * Lookup HCI link by address and type. Note that for SCO links there may 831 * be more than one link per address, so we only return links with no 832 * handle (ie new links) 833 */ 834 struct hci_link * 835 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint16_t type) 836 { 837 struct hci_link *link; 838 839 KASSERT(unit); 840 KASSERT(bdaddr); 841 842 TAILQ_FOREACH(link, &unit->hci_links, hl_next) { 843 if (link->hl_type != type) 844 continue; 845 846 if (type == HCI_LINK_SCO && link->hl_handle != 0) 847 continue; 848 849 if (bdaddr_same(&link->hl_bdaddr, bdaddr)) 850 break; 851 } 852 853 return link; 854 } 855 856 struct hci_link * 857 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle) 858 { 859 struct hci_link *link; 860 861 KASSERT(unit); 862 863 TAILQ_FOREACH(link, &unit->hci_links, hl_next) { 864 if (handle == link->hl_handle) 865 break; 866 } 867 868 return link; 869 } 870