xref: /netbsd-src/sys/netbt/hci_event.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: hci_event.c,v 1.12 2007/11/28 21:46:52 plunky Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005 Iain Hibbert.
5  * Copyright (c) 2006 Itronix Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of Itronix Inc. may not be used to endorse
17  *    or promote products derived from this software without specific
18  *    prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  * ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_event.c,v 1.12 2007/11/28 21:46:52 plunky Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42 
43 #include <netbt/bluetooth.h>
44 #include <netbt/hci.h>
45 #include <netbt/sco.h>
46 
47 static void hci_event_inquiry_result(struct hci_unit *, struct mbuf *);
48 static void hci_event_rssi_result(struct hci_unit *, struct mbuf *);
49 static void hci_event_command_status(struct hci_unit *, struct mbuf *);
50 static void hci_event_command_compl(struct hci_unit *, struct mbuf *);
51 static void hci_event_con_compl(struct hci_unit *, struct mbuf *);
52 static void hci_event_discon_compl(struct hci_unit *, struct mbuf *);
53 static void hci_event_con_req(struct hci_unit *, struct mbuf *);
54 static void hci_event_num_compl_pkts(struct hci_unit *, struct mbuf *);
55 static void hci_event_auth_compl(struct hci_unit *, struct mbuf *);
56 static void hci_event_encryption_change(struct hci_unit *, struct mbuf *);
57 static void hci_event_change_con_link_key_compl(struct hci_unit *, struct mbuf *);
58 static void hci_event_read_clock_offset_compl(struct hci_unit *, struct mbuf *);
59 static void hci_cmd_read_bdaddr(struct hci_unit *, struct mbuf *);
60 static void hci_cmd_read_buffer_size(struct hci_unit *, struct mbuf *);
61 static void hci_cmd_read_local_features(struct hci_unit *, struct mbuf *);
62 static void hci_cmd_reset(struct hci_unit *, struct mbuf *);
63 
64 #ifdef BLUETOOTH_DEBUG
65 int bluetooth_debug;
66 
67 static const char *hci_eventnames[] = {
68 /* 0x00 */ "NULL",
69 /* 0x01 */ "INQUIRY COMPLETE",
70 /* 0x02 */ "INQUIRY RESULT",
71 /* 0x03 */ "CONN COMPLETE",
72 /* 0x04 */ "CONN REQ",
73 /* 0x05 */ "DISCONN COMPLETE",
74 /* 0x06 */ "AUTH COMPLETE",
75 /* 0x07 */ "REMOTE NAME REQ COMPLETE",
76 /* 0x08 */ "ENCRYPTION CHANGE",
77 /* 0x09 */ "CHANGE CONN LINK KEY COMPLETE",
78 /* 0x0a */ "MASTER LINK KEY COMPLETE",
79 /* 0x0b */ "READ REMOTE FEATURES COMPLETE",
80 /* 0x0c */ "READ REMOTE VERSION INFO COMPLETE",
81 /* 0x0d */ "QoS SETUP COMPLETE",
82 /* 0x0e */ "COMMAND COMPLETE",
83 /* 0x0f */ "COMMAND STATUS",
84 /* 0x10 */ "HARDWARE ERROR",
85 /* 0x11 */ "FLUSH OCCUR",
86 /* 0x12 */ "ROLE CHANGE",
87 /* 0x13 */ "NUM COMPLETED PACKETS",
88 /* 0x14 */ "MODE CHANGE",
89 /* 0x15 */ "RETURN LINK KEYS",
90 /* 0x16 */ "PIN CODE REQ",
91 /* 0x17 */ "LINK KEY REQ",
92 /* 0x18 */ "LINK KEY NOTIFICATION",
93 /* 0x19 */ "LOOPBACK COMMAND",
94 /* 0x1a */ "DATA BUFFER OVERFLOW",
95 /* 0x1b */ "MAX SLOT CHANGE",
96 /* 0x1c */ "READ CLOCK OFFSET COMPLETE",
97 /* 0x1d */ "CONN PKT TYPE CHANGED",
98 /* 0x1e */ "QOS VIOLATION",
99 /* 0x1f */ "PAGE SCAN MODE CHANGE",
100 /* 0x20 */ "PAGE SCAN REP MODE CHANGE",
101 /* 0x21 */ "FLOW SPECIFICATION COMPLETE",
102 /* 0x22 */ "RSSI RESULT",
103 /* 0x23 */ "READ REMOTE EXT FEATURES"
104 };
105 
106 static const char *
107 hci_eventstr(unsigned int event)
108 {
109 
110 	if (event < (sizeof(hci_eventnames) / sizeof(*hci_eventnames)))
111 		return hci_eventnames[event];
112 
113 	switch (event) {
114 	case HCI_EVENT_SCO_CON_COMPL:	/* 0x2c */
115 		return "SCO CON COMPLETE";
116 
117 	case HCI_EVENT_SCO_CON_CHANGED:	/* 0x2d */
118 		return "SCO CON CHANGED";
119 
120 	case HCI_EVENT_BT_LOGO:		/* 0xfe */
121 		return "BT_LOGO";
122 
123 	case HCI_EVENT_VENDOR:		/* 0xff */
124 		return "VENDOR";
125 	}
126 
127 	return "UNRECOGNISED";
128 }
129 #endif	/* BLUETOOTH_DEBUG */
130 
131 /*
132  * process HCI Events
133  *
134  * We will free the mbuf at the end, no need for any sub
135  * functions to handle that. We kind of assume that the
136  * device sends us valid events.
137  */
138 void
139 hci_event(struct mbuf *m, struct hci_unit *unit)
140 {
141 	hci_event_hdr_t hdr;
142 
143 	KASSERT(m->m_flags & M_PKTHDR);
144 
145 	KASSERT(m->m_pkthdr.len >= sizeof(hdr));
146 	m_copydata(m, 0, sizeof(hdr), &hdr);
147 	m_adj(m, sizeof(hdr));
148 
149 	KASSERT(hdr.type == HCI_EVENT_PKT);
150 
151 	DPRINTFN(1, "(%s) event %s\n",
152 	    device_xname(unit->hci_dev), hci_eventstr(hdr.event));
153 
154 	switch(hdr.event) {
155 	case HCI_EVENT_COMMAND_STATUS:
156 		hci_event_command_status(unit, m);
157 		break;
158 
159 	case HCI_EVENT_COMMAND_COMPL:
160 		hci_event_command_compl(unit, m);
161 		break;
162 
163 	case HCI_EVENT_NUM_COMPL_PKTS:
164 		hci_event_num_compl_pkts(unit, m);
165 		break;
166 
167 	case HCI_EVENT_INQUIRY_RESULT:
168 		hci_event_inquiry_result(unit, m);
169 		break;
170 
171 	case HCI_EVENT_RSSI_RESULT:
172 		hci_event_rssi_result(unit, m);
173 		break;
174 
175 	case HCI_EVENT_CON_COMPL:
176 		hci_event_con_compl(unit, m);
177 		break;
178 
179 	case HCI_EVENT_DISCON_COMPL:
180 		hci_event_discon_compl(unit, m);
181 		break;
182 
183 	case HCI_EVENT_CON_REQ:
184 		hci_event_con_req(unit, m);
185 		break;
186 
187 	case HCI_EVENT_AUTH_COMPL:
188 		hci_event_auth_compl(unit, m);
189 		break;
190 
191 	case HCI_EVENT_ENCRYPTION_CHANGE:
192 		hci_event_encryption_change(unit, m);
193 		break;
194 
195 	case HCI_EVENT_CHANGE_CON_LINK_KEY_COMPL:
196 		hci_event_change_con_link_key_compl(unit, m);
197 		break;
198 
199 	case HCI_EVENT_READ_CLOCK_OFFSET_COMPL:
200 		hci_event_read_clock_offset_compl(unit, m);
201 		break;
202 
203 	case HCI_EVENT_SCO_CON_COMPL:
204 	case HCI_EVENT_INQUIRY_COMPL:
205 	case HCI_EVENT_REMOTE_NAME_REQ_COMPL:
206 	case HCI_EVENT_MASTER_LINK_KEY_COMPL:
207 	case HCI_EVENT_READ_REMOTE_FEATURES_COMPL:
208 	case HCI_EVENT_READ_REMOTE_VER_INFO_COMPL:
209 	case HCI_EVENT_QOS_SETUP_COMPL:
210 	case HCI_EVENT_HARDWARE_ERROR:
211 	case HCI_EVENT_FLUSH_OCCUR:
212 	case HCI_EVENT_ROLE_CHANGE:
213 	case HCI_EVENT_MODE_CHANGE:
214 	case HCI_EVENT_RETURN_LINK_KEYS:
215 	case HCI_EVENT_PIN_CODE_REQ:
216 	case HCI_EVENT_LINK_KEY_REQ:
217 	case HCI_EVENT_LINK_KEY_NOTIFICATION:
218 	case HCI_EVENT_LOOPBACK_COMMAND:
219 	case HCI_EVENT_DATA_BUFFER_OVERFLOW:
220 	case HCI_EVENT_MAX_SLOT_CHANGE:
221 	case HCI_EVENT_CON_PKT_TYPE_CHANGED:
222 	case HCI_EVENT_QOS_VIOLATION:
223 	case HCI_EVENT_PAGE_SCAN_MODE_CHANGE:
224 	case HCI_EVENT_PAGE_SCAN_REP_MODE_CHANGE:
225 	case HCI_EVENT_FLOW_SPECIFICATION_COMPL:
226 	case HCI_EVENT_READ_REMOTE_EXTENDED_FEATURES:
227 	case HCI_EVENT_SCO_CON_CHANGED:
228 	case HCI_EVENT_BT_LOGO:
229 	case HCI_EVENT_VENDOR:
230 		break;
231 
232 	default:
233 		UNKNOWN(hdr.event);
234 		break;
235 	}
236 
237 	m_freem(m);
238 }
239 
240 /*
241  * Command Status
242  *
243  * Update our record of num_cmd_pkts then post-process any pending commands
244  * and optionally restart cmd output on the unit.
245  */
246 static void
247 hci_event_command_status(struct hci_unit *unit, struct mbuf *m)
248 {
249 	hci_command_status_ep ep;
250 
251 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
252 	m_copydata(m, 0, sizeof(ep), &ep);
253 	m_adj(m, sizeof(ep));
254 
255 	DPRINTFN(1, "(%s) opcode (%03x|%04x) status = 0x%x num_cmd_pkts = %d\n",
256 		device_xname(unit->hci_dev),
257 		HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
258 		ep.status,
259 		ep.num_cmd_pkts);
260 
261 	if (ep.status > 0)
262 		aprint_error_dev(unit->hci_dev,
263 		    "CommandStatus opcode (%03x|%04x) failed (status=0x%02x)\n",
264 		    HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
265 		    ep.status);
266 
267 	unit->hci_num_cmd_pkts = ep.num_cmd_pkts;
268 
269 	/*
270 	 * post processing of pending commands
271 	 */
272 	switch(le16toh(ep.opcode)) {
273 	default:
274 		break;
275 	}
276 
277 	while (unit->hci_num_cmd_pkts > 0 && MBUFQ_FIRST(&unit->hci_cmdwait)) {
278 		MBUFQ_DEQUEUE(&unit->hci_cmdwait, m);
279 		hci_output_cmd(unit, m);
280 	}
281 }
282 
283 /*
284  * Command Complete
285  *
286  * Update our record of num_cmd_pkts then handle the completed command,
287  * and optionally restart cmd output on the unit.
288  */
289 static void
290 hci_event_command_compl(struct hci_unit *unit, struct mbuf *m)
291 {
292 	hci_command_compl_ep ep;
293 	hci_status_rp rp;
294 
295 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
296 	m_copydata(m, 0, sizeof(ep), &ep);
297 	m_adj(m, sizeof(ep));
298 
299 	DPRINTFN(1, "(%s) opcode (%03x|%04x) num_cmd_pkts = %d\n",
300 		device_xname(unit->hci_dev),
301 		HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
302 		ep.num_cmd_pkts);
303 
304 	/*
305 	 * I am not sure if this is completely correct, it is not guaranteed
306 	 * that a command_complete packet will contain the status though most
307 	 * do seem to.
308 	 */
309 	m_copydata(m, 0, sizeof(rp), &rp);
310 	if (rp.status > 0)
311 		aprint_error_dev(unit->hci_dev,
312 		    "CommandComplete opcode (%03x|%04x) failed (status=0x%02x)\n",
313 		    HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
314 		    rp.status);
315 
316 	unit->hci_num_cmd_pkts = ep.num_cmd_pkts;
317 
318 	/*
319 	 * post processing of completed commands
320 	 */
321 	switch(le16toh(ep.opcode)) {
322 	case HCI_CMD_READ_BDADDR:
323 		hci_cmd_read_bdaddr(unit, m);
324 		break;
325 
326 	case HCI_CMD_READ_BUFFER_SIZE:
327 		hci_cmd_read_buffer_size(unit, m);
328 		break;
329 
330 	case HCI_CMD_READ_LOCAL_FEATURES:
331 		hci_cmd_read_local_features(unit, m);
332 		break;
333 
334 	case HCI_CMD_RESET:
335 		hci_cmd_reset(unit, m);
336 		break;
337 
338 	default:
339 		break;
340 	}
341 
342 	while (unit->hci_num_cmd_pkts > 0 && MBUFQ_FIRST(&unit->hci_cmdwait)) {
343 		MBUFQ_DEQUEUE(&unit->hci_cmdwait, m);
344 		hci_output_cmd(unit, m);
345 	}
346 }
347 
348 /*
349  * Number of Completed Packets
350  *
351  * This is sent periodically by the Controller telling us how many
352  * buffers are now freed up and which handle was using them. From
353  * this we determine which type of buffer it was and add the qty
354  * back into the relevant packet counter, then restart output on
355  * links that have halted.
356  */
357 static void
358 hci_event_num_compl_pkts(struct hci_unit *unit, struct mbuf *m)
359 {
360 	hci_num_compl_pkts_ep ep;
361 	struct hci_link *link, *next;
362 	uint16_t handle, num;
363 	int num_acl = 0, num_sco = 0;
364 
365 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
366 	m_copydata(m, 0, sizeof(ep), &ep);
367 	m_adj(m, sizeof(ep));
368 
369 	while (ep.num_con_handles--) {
370 		m_copydata(m, 0, sizeof(handle), &handle);
371 		m_adj(m, sizeof(handle));
372 		handle = le16toh(handle);
373 
374 		m_copydata(m, 0, sizeof(num), &num);
375 		m_adj(m, sizeof(num));
376 		num = le16toh(num);
377 
378 		link = hci_link_lookup_handle(unit, handle);
379 		if (link) {
380 			if (link->hl_type == HCI_LINK_ACL) {
381 				num_acl += num;
382 				hci_acl_complete(link, num);
383 			} else {
384 				num_sco += num;
385 				hci_sco_complete(link, num);
386 			}
387 		} else {
388 			/* XXX need to issue Read_Buffer_Size or Reset? */
389 			aprint_error_dev(unit->hci_dev,
390 			    "unknown handle %d! (losing track of %d packet buffer%s)\n",
391 			    handle, num, (num == 1 ? "" : "s"));
392 		}
393 	}
394 
395 	/*
396 	 * Move up any queued packets. When a link has sent data, it will move
397 	 * to the back of the queue - technically then if a link had something
398 	 * to send and there were still buffers available it could get started
399 	 * twice but it seemed more important to to handle higher loads fairly
400 	 * than worry about wasting cycles when we are not busy.
401 	 */
402 
403 	unit->hci_num_acl_pkts += num_acl;
404 	unit->hci_num_sco_pkts += num_sco;
405 
406 	link = TAILQ_FIRST(&unit->hci_links);
407 	while (link && (unit->hci_num_acl_pkts > 0 || unit->hci_num_sco_pkts > 0)) {
408 		next = TAILQ_NEXT(link, hl_next);
409 
410 		if (link->hl_type == HCI_LINK_ACL) {
411 			if (unit->hci_num_acl_pkts > 0 && link->hl_txqlen > 0)
412 				hci_acl_start(link);
413 		} else {
414 			if (unit->hci_num_sco_pkts > 0 && link->hl_txqlen > 0)
415 				hci_sco_start(link);
416 		}
417 
418 		link = next;
419 	}
420 }
421 
422 /*
423  * Inquiry Result
424  *
425  * keep a note of devices seen, so we know which unit to use
426  * on outgoing connections
427  */
428 static void
429 hci_event_inquiry_result(struct hci_unit *unit, struct mbuf *m)
430 {
431 	hci_inquiry_result_ep ep;
432 	hci_inquiry_response ir;
433 	struct hci_memo *memo;
434 
435 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
436 	m_copydata(m, 0, sizeof(ep), &ep);
437 	m_adj(m, sizeof(ep));
438 
439 	DPRINTFN(1, "%d response%s\n", ep.num_responses,
440 				(ep.num_responses == 1 ? "" : "s"));
441 
442 	while(ep.num_responses--) {
443 		KASSERT(m->m_pkthdr.len >= sizeof(ir));
444 		m_copydata(m, 0, sizeof(ir), &ir);
445 		m_adj(m, sizeof(ir));
446 
447 		DPRINTFN(1, "bdaddr %02x:%02x:%02x:%02x:%02x:%02x\n",
448 			ir.bdaddr.b[5], ir.bdaddr.b[4], ir.bdaddr.b[3],
449 			ir.bdaddr.b[2], ir.bdaddr.b[1], ir.bdaddr.b[0]);
450 
451 		memo = hci_memo_new(unit, &ir.bdaddr);
452 		if (memo != NULL) {
453 			memo->page_scan_rep_mode = ir.page_scan_rep_mode;
454 			memo->page_scan_mode = ir.page_scan_mode;
455 			memo->clock_offset = ir.clock_offset;
456 		}
457 	}
458 }
459 
460 /*
461  * Inquiry Result with RSSI
462  *
463  * as above but different packet when RSSI result is enabled
464  */
465 static void
466 hci_event_rssi_result(struct hci_unit *unit, struct mbuf *m)
467 {
468 	hci_rssi_result_ep ep;
469 	hci_rssi_response rr;
470 	struct hci_memo *memo;
471 
472 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
473 	m_copydata(m, 0, sizeof(ep), &ep);
474 	m_adj(m, sizeof(ep));
475 
476 	DPRINTFN(1, "%d response%s\n", ep.num_responses,
477 				(ep.num_responses == 1 ? "" : "s"));
478 
479 	while(ep.num_responses--) {
480 		KASSERT(m->m_pkthdr.len >= sizeof(rr));
481 		m_copydata(m, 0, sizeof(rr), &rr);
482 		m_adj(m, sizeof(rr));
483 
484 		DPRINTFN(1, "bdaddr %02x:%02x:%02x:%02x:%02x:%02x\n",
485 			rr.bdaddr.b[5], rr.bdaddr.b[4], rr.bdaddr.b[3],
486 			rr.bdaddr.b[2], rr.bdaddr.b[1], rr.bdaddr.b[0]);
487 
488 		memo = hci_memo_new(unit, &rr.bdaddr);
489 		if (memo != NULL) {
490 			memo->page_scan_rep_mode = rr.page_scan_rep_mode;
491 			memo->page_scan_mode = 0;
492 			memo->clock_offset = rr.clock_offset;
493 		}
494 	}
495 }
496 
497 /*
498  * Connection Complete
499  *
500  * Sent to us when a connection is made. If there is no link
501  * structure already allocated for this, we must have changed
502  * our mind, so just disconnect.
503  */
504 static void
505 hci_event_con_compl(struct hci_unit *unit, struct mbuf *m)
506 {
507 	hci_con_compl_ep ep;
508 	hci_write_link_policy_settings_cp cp;
509 	struct hci_link *link;
510 	int err;
511 
512 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
513 	m_copydata(m, 0, sizeof(ep), &ep);
514 	m_adj(m, sizeof(ep));
515 
516 	DPRINTFN(1, "(%s) %s connection complete for "
517 		"%02x:%02x:%02x:%02x:%02x:%02x status %#x\n",
518 		device_xname(unit->hci_dev),
519 		(ep.link_type == HCI_LINK_ACL ? "ACL" : "SCO"),
520 		ep.bdaddr.b[5], ep.bdaddr.b[4], ep.bdaddr.b[3],
521 		ep.bdaddr.b[2], ep.bdaddr.b[1], ep.bdaddr.b[0],
522 		ep.status);
523 
524 	link = hci_link_lookup_bdaddr(unit, &ep.bdaddr, ep.link_type);
525 
526 	if (ep.status) {
527 		if (link != NULL) {
528 			switch (ep.status) {
529 			case 0x04: /* "Page Timeout" */
530 				err = EHOSTDOWN;
531 				break;
532 
533 			case 0x08: /* "Connection Timed Out" */
534 				err = ETIMEDOUT;
535 				break;
536 
537 			case 0x16: /* "Connection Terminated by Local Host" */
538 				err = 0;
539 				break;
540 
541 			default:
542 				err = ECONNREFUSED;
543 				break;
544 			}
545 
546 			hci_link_free(link, err);
547 		}
548 
549 		return;
550 	}
551 
552 	if (link == NULL) {
553 		hci_discon_cp dp;
554 
555 		dp.con_handle = ep.con_handle;
556 		dp.reason = 0x13; /* "Remote User Terminated Connection" */
557 
558 		hci_send_cmd(unit, HCI_CMD_DISCONNECT, &dp, sizeof(dp));
559 		return;
560 	}
561 
562 	/* XXX could check auth_enable here */
563 
564 	if (ep.encryption_mode)
565 		link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_ENCRYPT);
566 
567 	link->hl_state = HCI_LINK_OPEN;
568 	link->hl_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
569 
570 	if (ep.link_type == HCI_LINK_ACL) {
571 		cp.con_handle = ep.con_handle;
572 		cp.settings = htole16(unit->hci_link_policy);
573 		err = hci_send_cmd(unit, HCI_CMD_WRITE_LINK_POLICY_SETTINGS,
574 						&cp, sizeof(cp));
575 		if (err)
576 			aprint_error_dev(unit->hci_dev,
577 			    "Warning, could not write link policy\n");
578 
579 		err = hci_send_cmd(unit, HCI_CMD_READ_CLOCK_OFFSET,
580 				    &cp.con_handle, sizeof(cp.con_handle));
581 		if (err)
582 			aprint_error_dev(unit->hci_dev,
583 			    "Warning, could not read clock offset\n");
584 
585 		err = hci_acl_setmode(link);
586 		if (err == EINPROGRESS)
587 			return;
588 
589 		hci_acl_linkmode(link);
590 	} else {
591 		(*link->hl_sco->sp_proto->connected)(link->hl_sco->sp_upper);
592 	}
593 }
594 
595 /*
596  * Disconnection Complete
597  *
598  * This is sent in response to a disconnection request, but also if
599  * the remote device goes out of range.
600  */
601 static void
602 hci_event_discon_compl(struct hci_unit *unit, struct mbuf *m)
603 {
604 	hci_discon_compl_ep ep;
605 	struct hci_link *link;
606 
607 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
608 	m_copydata(m, 0, sizeof(ep), &ep);
609 	m_adj(m, sizeof(ep));
610 
611 	ep.con_handle = le16toh(ep.con_handle);
612 
613 	DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
614 
615 	link = hci_link_lookup_handle(unit, HCI_CON_HANDLE(ep.con_handle));
616 	if (link)
617 		hci_link_free(link, ENOLINK);
618 }
619 
620 /*
621  * Connect Request
622  *
623  * We check upstream for appropriate listeners and accept connections
624  * that are wanted.
625  */
626 static void
627 hci_event_con_req(struct hci_unit *unit, struct mbuf *m)
628 {
629 	hci_con_req_ep ep;
630 	hci_accept_con_cp ap;
631 	hci_reject_con_cp rp;
632 	struct hci_link *link;
633 
634 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
635 	m_copydata(m, 0, sizeof(ep), &ep);
636 	m_adj(m, sizeof(ep));
637 
638 	DPRINTFN(1, "bdaddr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
639 		"class %2.2x%2.2x%2.2x type %s\n",
640 		ep.bdaddr.b[5], ep.bdaddr.b[4], ep.bdaddr.b[3],
641 		ep.bdaddr.b[2], ep.bdaddr.b[1], ep.bdaddr.b[0],
642 		ep.uclass[0], ep.uclass[1], ep.uclass[2],
643 		ep.link_type == HCI_LINK_ACL ? "ACL" : "SCO");
644 
645 	if (ep.link_type == HCI_LINK_ACL)
646 		link = hci_acl_newconn(unit, &ep.bdaddr);
647 	else
648 		link = hci_sco_newconn(unit, &ep.bdaddr);
649 
650 	if (link == NULL) {
651 		memset(&rp, 0, sizeof(rp));
652 		bdaddr_copy(&rp.bdaddr, &ep.bdaddr);
653 		rp.reason = 0x0f;	/* Unacceptable BD_ADDR */
654 
655 		hci_send_cmd(unit, HCI_CMD_REJECT_CON, &rp, sizeof(rp));
656 	} else {
657 		memset(&ap, 0, sizeof(ap));
658 		bdaddr_copy(&ap.bdaddr, &ep.bdaddr);
659 		if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
660 			ap.role = HCI_ROLE_MASTER;
661 		else
662 			ap.role = HCI_ROLE_SLAVE;
663 
664 		hci_send_cmd(unit, HCI_CMD_ACCEPT_CON, &ap, sizeof(ap));
665 	}
666 }
667 
668 /*
669  * Auth Complete
670  *
671  * Authentication has been completed on an ACL link. We can notify the
672  * upper layer protocols unless further mode changes are pending.
673  */
674 static void
675 hci_event_auth_compl(struct hci_unit *unit, struct mbuf *m)
676 {
677 	hci_auth_compl_ep ep;
678 	struct hci_link *link;
679 	int err;
680 
681 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
682 	m_copydata(m, 0, sizeof(ep), &ep);
683 	m_adj(m, sizeof(ep));
684 
685 	ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
686 
687 	DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
688 
689 	link = hci_link_lookup_handle(unit, ep.con_handle);
690 	if (link == NULL || link->hl_type != HCI_LINK_ACL)
691 		return;
692 
693 	if (ep.status == 0) {
694 		link->hl_flags |= HCI_LINK_AUTH;
695 
696 		if (link->hl_state == HCI_LINK_WAIT_AUTH)
697 			link->hl_state = HCI_LINK_OPEN;
698 
699 		err = hci_acl_setmode(link);
700 		if (err == EINPROGRESS)
701 			return;
702 	}
703 
704 	hci_acl_linkmode(link);
705 }
706 
707 /*
708  * Encryption Change
709  *
710  * The encryption status has changed. Basically, we note the change
711  * then notify the upper layer protocol unless further mode changes
712  * are pending.
713  * Note that if encryption gets disabled when it has been requested,
714  * we will attempt to enable it again.. (its a feature not a bug :)
715  */
716 static void
717 hci_event_encryption_change(struct hci_unit *unit, struct mbuf *m)
718 {
719 	hci_encryption_change_ep ep;
720 	struct hci_link *link;
721 	int err;
722 
723 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
724 	m_copydata(m, 0, sizeof(ep), &ep);
725 	m_adj(m, sizeof(ep));
726 
727 	ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
728 
729 	DPRINTFN(1, "handle #%d, status=0x%x, encryption_enable=0x%x\n",
730 		 ep.con_handle, ep.status, ep.encryption_enable);
731 
732 	link = hci_link_lookup_handle(unit, ep.con_handle);
733 	if (link == NULL || link->hl_type != HCI_LINK_ACL)
734 		return;
735 
736 	if (ep.status == 0) {
737 		if (ep.encryption_enable == 0)
738 			link->hl_flags &= ~HCI_LINK_ENCRYPT;
739 		else
740 			link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_ENCRYPT);
741 
742 		if (link->hl_state == HCI_LINK_WAIT_ENCRYPT)
743 			link->hl_state = HCI_LINK_OPEN;
744 
745 		err = hci_acl_setmode(link);
746 		if (err == EINPROGRESS)
747 			return;
748 	}
749 
750 	hci_acl_linkmode(link);
751 }
752 
753 /*
754  * Change Connection Link Key Complete
755  *
756  * Link keys are handled in userland but if we are waiting to secure
757  * this link, we should notify the upper protocols. A SECURE request
758  * only needs a single key change, so we can cancel the request.
759  */
760 static void
761 hci_event_change_con_link_key_compl(struct hci_unit *unit, struct mbuf *m)
762 {
763 	hci_change_con_link_key_compl_ep ep;
764 	struct hci_link *link;
765 	int err;
766 
767 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
768 	m_copydata(m, 0, sizeof(ep), &ep);
769 	m_adj(m, sizeof(ep));
770 
771 	ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
772 
773 	DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
774 
775 	link = hci_link_lookup_handle(unit, ep.con_handle);
776 	if (link == NULL || link->hl_type != HCI_LINK_ACL)
777 		return;
778 
779 	link->hl_flags &= ~HCI_LINK_SECURE_REQ;
780 
781 	if (ep.status == 0) {
782 		link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_SECURE);
783 
784 		if (link->hl_state == HCI_LINK_WAIT_SECURE)
785 			link->hl_state = HCI_LINK_OPEN;
786 
787 		err = hci_acl_setmode(link);
788 		if (err == EINPROGRESS)
789 			return;
790 	}
791 
792 	hci_acl_linkmode(link);
793 }
794 
795 /*
796  * Read Clock Offset Complete
797  *
798  * We keep a note of the clock offset of remote devices when a
799  * link is made, in order to facilitate reconnections to the device
800  */
801 static void
802 hci_event_read_clock_offset_compl(struct hci_unit *unit, struct mbuf *m)
803 {
804 	hci_read_clock_offset_compl_ep ep;
805 	struct hci_link *link;
806 
807 	KASSERT(m->m_pkthdr.len >= sizeof(ep));
808 	m_copydata(m, 0, sizeof(ep), &ep);
809 	m_adj(m, sizeof(ep));
810 
811 	DPRINTFN(1, "handle #%d, offset=%u, status=0x%x\n",
812 		le16toh(ep.con_handle), le16toh(ep.clock_offset), ep.status);
813 
814 	ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
815 	link = hci_link_lookup_handle(unit, ep.con_handle);
816 
817 	if (ep.status != 0 || link == NULL)
818 		return;
819 
820 	link->hl_clock = ep.clock_offset;
821 }
822 
823 /*
824  * process results of read_bdaddr command_complete event
825  */
826 static void
827 hci_cmd_read_bdaddr(struct hci_unit *unit, struct mbuf *m)
828 {
829 	hci_read_bdaddr_rp rp;
830 
831 	KASSERT(m->m_pkthdr.len >= sizeof(rp));
832 	m_copydata(m, 0, sizeof(rp), &rp);
833 	m_adj(m, sizeof(rp));
834 
835 	if (rp.status > 0)
836 		return;
837 
838 	if ((unit->hci_flags & BTF_INIT_BDADDR) == 0)
839 		return;
840 
841 	bdaddr_copy(&unit->hci_bdaddr, &rp.bdaddr);
842 
843 	unit->hci_flags &= ~BTF_INIT_BDADDR;
844 
845 	wakeup(unit);
846 }
847 
848 /*
849  * process results of read_buffer_size command_complete event
850  */
851 static void
852 hci_cmd_read_buffer_size(struct hci_unit *unit, struct mbuf *m)
853 {
854 	hci_read_buffer_size_rp rp;
855 
856 	KASSERT(m->m_pkthdr.len >= sizeof(rp));
857 	m_copydata(m, 0, sizeof(rp), &rp);
858 	m_adj(m, sizeof(rp));
859 
860 	if (rp.status > 0)
861 		return;
862 
863 	if ((unit->hci_flags & BTF_INIT_BUFFER_SIZE) == 0)
864 		return;
865 
866 	unit->hci_max_acl_size = le16toh(rp.max_acl_size);
867 	unit->hci_num_acl_pkts = le16toh(rp.num_acl_pkts);
868 	unit->hci_max_sco_size = rp.max_sco_size;
869 	unit->hci_num_sco_pkts = le16toh(rp.num_sco_pkts);
870 
871 	unit->hci_flags &= ~BTF_INIT_BUFFER_SIZE;
872 
873 	wakeup(unit);
874 }
875 
876 /*
877  * process results of read_local_features command_complete event
878  */
879 static void
880 hci_cmd_read_local_features(struct hci_unit *unit, struct mbuf *m)
881 {
882 	hci_read_local_features_rp rp;
883 
884 	KASSERT(m->m_pkthdr.len >= sizeof(rp));
885 	m_copydata(m, 0, sizeof(rp), &rp);
886 	m_adj(m, sizeof(rp));
887 
888 	if (rp.status > 0)
889 		return;
890 
891 	if ((unit->hci_flags & BTF_INIT_FEATURES) == 0)
892 		return;
893 
894 	unit->hci_lmp_mask = 0;
895 
896 	if (rp.features[0] & HCI_LMP_ROLE_SWITCH)
897 		unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_ROLE_SWITCH;
898 
899 	if (rp.features[0] & HCI_LMP_HOLD_MODE)
900 		unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_HOLD_MODE;
901 
902 	if (rp.features[0] & HCI_LMP_SNIFF_MODE)
903 		unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_SNIFF_MODE;
904 
905 	if (rp.features[1] & HCI_LMP_PARK_MODE)
906 		unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_PARK_MODE;
907 
908 	/* ACL packet mask */
909 	unit->hci_acl_mask = HCI_PKT_DM1 | HCI_PKT_DH1;
910 
911 	if (rp.features[0] & HCI_LMP_3SLOT)
912 		unit->hci_acl_mask |= HCI_PKT_DM3 | HCI_PKT_DH3;
913 
914 	if (rp.features[0] & HCI_LMP_5SLOT)
915 		unit->hci_acl_mask |= HCI_PKT_DM5 | HCI_PKT_DH5;
916 
917 	if ((rp.features[3] & HCI_LMP_EDR_ACL_2MBPS) == 0)
918 		unit->hci_acl_mask |= HCI_PKT_2MBPS_DH1
919 				    | HCI_PKT_2MBPS_DH3
920 				    | HCI_PKT_2MBPS_DH5;
921 
922 	if ((rp.features[3] & HCI_LMP_EDR_ACL_3MBPS) == 0)
923 		unit->hci_acl_mask |= HCI_PKT_3MBPS_DH1
924 				    | HCI_PKT_3MBPS_DH3
925 				    | HCI_PKT_3MBPS_DH5;
926 
927 	if ((rp.features[4] & HCI_LMP_3SLOT_EDR_ACL) == 0)
928 		unit->hci_acl_mask |= HCI_PKT_2MBPS_DH3
929 				    | HCI_PKT_3MBPS_DH3;
930 
931 	if ((rp.features[5] & HCI_LMP_5SLOT_EDR_ACL) == 0)
932 		unit->hci_acl_mask |= HCI_PKT_2MBPS_DH5
933 				    | HCI_PKT_3MBPS_DH5;
934 
935 	unit->hci_packet_type = unit->hci_acl_mask;
936 
937 	/* SCO packet mask */
938 	unit->hci_sco_mask = 0;
939 	if (rp.features[1] & HCI_LMP_SCO_LINK)
940 		unit->hci_sco_mask |= HCI_PKT_HV1;
941 
942 	if (rp.features[1] & HCI_LMP_HV2_PKT)
943 		unit->hci_sco_mask |= HCI_PKT_HV2;
944 
945 	if (rp.features[1] & HCI_LMP_HV3_PKT)
946 		unit->hci_sco_mask |= HCI_PKT_HV3;
947 
948 	if (rp.features[3] & HCI_LMP_EV3_PKT)
949 		unit->hci_sco_mask |= HCI_PKT_EV3;
950 
951 	if (rp.features[4] & HCI_LMP_EV4_PKT)
952 		unit->hci_sco_mask |= HCI_PKT_EV4;
953 
954 	if (rp.features[4] & HCI_LMP_EV5_PKT)
955 		unit->hci_sco_mask |= HCI_PKT_EV5;
956 
957 	/* XXX what do 2MBPS/3MBPS/3SLOT eSCO mean? */
958 
959 	unit->hci_flags &= ~BTF_INIT_FEATURES;
960 
961 	wakeup(unit);
962 
963 	DPRINTFN(1, "%s: lmp_mask %4.4x, acl_mask %4.4x, sco_mask %4.4x\n",
964 		device_xname(unit->hci_dev), unit->hci_lmp_mask,
965 		unit->hci_acl_mask, unit->hci_sco_mask);
966 }
967 
968 /*
969  * process results of reset command_complete event
970  *
971  * This has killed all the connections, so close down anything we have left,
972  * and reinitialise the unit.
973  */
974 static void
975 hci_cmd_reset(struct hci_unit *unit, struct mbuf *m)
976 {
977 	hci_reset_rp rp;
978 	struct hci_link *link, *next;
979 	int acl;
980 
981 	KASSERT(m->m_pkthdr.len >= sizeof(rp));
982 	m_copydata(m, 0, sizeof(rp), &rp);
983 	m_adj(m, sizeof(rp));
984 
985 	if (rp.status != 0)
986 		return;
987 
988 	/*
989 	 * release SCO links first, since they may be holding
990 	 * an ACL link reference.
991 	 */
992 	for (acl = 0 ; acl < 2 ; acl++) {
993 		next = TAILQ_FIRST(&unit->hci_links);
994 		while ((link = next) != NULL) {
995 			next = TAILQ_NEXT(link, hl_next);
996 			if (acl || link->hl_type != HCI_LINK_ACL)
997 				hci_link_free(link, ECONNABORTED);
998 		}
999 	}
1000 
1001 	unit->hci_num_acl_pkts = 0;
1002 	unit->hci_num_sco_pkts = 0;
1003 
1004 	if (hci_send_cmd(unit, HCI_CMD_READ_BDADDR, NULL, 0))
1005 		return;
1006 
1007 	if (hci_send_cmd(unit, HCI_CMD_READ_BUFFER_SIZE, NULL, 0))
1008 		return;
1009 
1010 	if (hci_send_cmd(unit, HCI_CMD_READ_LOCAL_FEATURES, NULL, 0))
1011 		return;
1012 }
1013