hci_event.c revision 1.15 1 /* $NetBSD: hci_event.c,v 1.15 2008/03/06 20:56:26 plunky Exp $ */
2
3 /*-
4 * Copyright (c) 2005 Iain Hibbert.
5 * Copyright (c) 2006 Itronix Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of Itronix Inc. may not be used to endorse
17 * or promote products derived from this software without specific
18 * prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_event.c,v 1.15 2008/03/06 20:56:26 plunky Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42
43 #include <netbt/bluetooth.h>
44 #include <netbt/hci.h>
45 #include <netbt/sco.h>
46
47 static void hci_event_inquiry_result(struct hci_unit *, struct mbuf *);
48 static void hci_event_rssi_result(struct hci_unit *, struct mbuf *);
49 static void hci_event_command_status(struct hci_unit *, struct mbuf *);
50 static void hci_event_command_compl(struct hci_unit *, struct mbuf *);
51 static void hci_event_con_compl(struct hci_unit *, struct mbuf *);
52 static void hci_event_discon_compl(struct hci_unit *, struct mbuf *);
53 static void hci_event_con_req(struct hci_unit *, struct mbuf *);
54 static void hci_event_num_compl_pkts(struct hci_unit *, struct mbuf *);
55 static void hci_event_auth_compl(struct hci_unit *, struct mbuf *);
56 static void hci_event_encryption_change(struct hci_unit *, struct mbuf *);
57 static void hci_event_change_con_link_key_compl(struct hci_unit *, struct mbuf *);
58 static void hci_event_read_clock_offset_compl(struct hci_unit *, struct mbuf *);
59 static void hci_cmd_read_bdaddr(struct hci_unit *, struct mbuf *);
60 static void hci_cmd_read_buffer_size(struct hci_unit *, struct mbuf *);
61 static void hci_cmd_read_local_features(struct hci_unit *, struct mbuf *);
62 static void hci_cmd_read_local_ver(struct hci_unit *, struct mbuf *);
63 static void hci_cmd_read_local_commands(struct hci_unit *, struct mbuf *);
64 static void hci_cmd_reset(struct hci_unit *, struct mbuf *);
65 static void hci_cmd_create_con(struct hci_unit *unit, uint8_t status);
66
67 #ifdef BLUETOOTH_DEBUG
68 int bluetooth_debug;
69
70 static const char *hci_eventnames[] = {
71 /* 0x00 */ "NULL",
72 /* 0x01 */ "INQUIRY COMPLETE",
73 /* 0x02 */ "INQUIRY RESULT",
74 /* 0x03 */ "CONN COMPLETE",
75 /* 0x04 */ "CONN REQ",
76 /* 0x05 */ "DISCONN COMPLETE",
77 /* 0x06 */ "AUTH COMPLETE",
78 /* 0x07 */ "REMOTE NAME REQ COMPLETE",
79 /* 0x08 */ "ENCRYPTION CHANGE",
80 /* 0x09 */ "CHANGE CONN LINK KEY COMPLETE",
81 /* 0x0a */ "MASTER LINK KEY COMPLETE",
82 /* 0x0b */ "READ REMOTE FEATURES COMPLETE",
83 /* 0x0c */ "READ REMOTE VERSION INFO COMPLETE",
84 /* 0x0d */ "QoS SETUP COMPLETE",
85 /* 0x0e */ "COMMAND COMPLETE",
86 /* 0x0f */ "COMMAND STATUS",
87 /* 0x10 */ "HARDWARE ERROR",
88 /* 0x11 */ "FLUSH OCCUR",
89 /* 0x12 */ "ROLE CHANGE",
90 /* 0x13 */ "NUM COMPLETED PACKETS",
91 /* 0x14 */ "MODE CHANGE",
92 /* 0x15 */ "RETURN LINK KEYS",
93 /* 0x16 */ "PIN CODE REQ",
94 /* 0x17 */ "LINK KEY REQ",
95 /* 0x18 */ "LINK KEY NOTIFICATION",
96 /* 0x19 */ "LOOPBACK COMMAND",
97 /* 0x1a */ "DATA BUFFER OVERFLOW",
98 /* 0x1b */ "MAX SLOT CHANGE",
99 /* 0x1c */ "READ CLOCK OFFSET COMPLETE",
100 /* 0x1d */ "CONN PKT TYPE CHANGED",
101 /* 0x1e */ "QOS VIOLATION",
102 /* 0x1f */ "PAGE SCAN MODE CHANGE",
103 /* 0x20 */ "PAGE SCAN REP MODE CHANGE",
104 /* 0x21 */ "FLOW SPECIFICATION COMPLETE",
105 /* 0x22 */ "RSSI RESULT",
106 /* 0x23 */ "READ REMOTE EXT FEATURES",
107 /* 0x24 */ "UNKNOWN",
108 /* 0x25 */ "UNKNOWN",
109 /* 0x26 */ "UNKNOWN",
110 /* 0x27 */ "UNKNOWN",
111 /* 0x28 */ "UNKNOWN",
112 /* 0x29 */ "UNKNOWN",
113 /* 0x2a */ "UNKNOWN",
114 /* 0x2b */ "UNKNOWN",
115 /* 0x2c */ "SCO CON COMPLETE",
116 /* 0x2d */ "SCO CON CHANGED",
117 /* 0x2e */ "SNIFF SUBRATING",
118 /* 0x2f */ "EXTENDED INQUIRY RESULT",
119 /* 0x30 */ "ENCRYPTION KEY REFRESH",
120 /* 0x31 */ "IO CAPABILITY REQUEST",
121 /* 0x32 */ "IO CAPABILITY RESPONSE",
122 /* 0x33 */ "USER CONFIRM REQUEST",
123 /* 0x34 */ "USER PASSKEY REQUEST",
124 /* 0x35 */ "REMOTE OOB DATA REQUEST",
125 /* 0x36 */ "SIMPLE PAIRING COMPLETE",
126 /* 0x37 */ "UNKNOWN",
127 /* 0x38 */ "LINK SUPERVISION TIMEOUT CHANGED",
128 /* 0x39 */ "ENHANCED FLUSH COMPLETE",
129 /* 0x3a */ "UNKNOWN",
130 /* 0x3b */ "USER PASSKEY NOTIFICATION",
131 /* 0x3c */ "KEYPRESS NOTIFICATION",
132 /* 0x3d */ "REMOTE HOST FEATURES NOTIFICATION",
133 };
134
135 static const char *
136 hci_eventstr(unsigned int event)
137 {
138
139 if (event < __arraycount(hci_eventnames))
140 return hci_eventnames[event];
141
142 switch (event) {
143 case HCI_EVENT_BT_LOGO: /* 0xfe */
144 return "BT_LOGO";
145
146 case HCI_EVENT_VENDOR: /* 0xff */
147 return "VENDOR";
148 }
149
150 return "UNKNOWN";
151 }
152 #endif /* BLUETOOTH_DEBUG */
153
154 /*
155 * process HCI Events
156 *
157 * We will free the mbuf at the end, no need for any sub
158 * functions to handle that. We kind of assume that the
159 * device sends us valid events.
160 */
161 void
162 hci_event(struct mbuf *m, struct hci_unit *unit)
163 {
164 hci_event_hdr_t hdr;
165
166 KASSERT(m->m_flags & M_PKTHDR);
167
168 KASSERT(m->m_pkthdr.len >= sizeof(hdr));
169 m_copydata(m, 0, sizeof(hdr), &hdr);
170 m_adj(m, sizeof(hdr));
171
172 KASSERT(hdr.type == HCI_EVENT_PKT);
173
174 DPRINTFN(1, "(%s) event %s\n",
175 device_xname(unit->hci_dev), hci_eventstr(hdr.event));
176
177 switch(hdr.event) {
178 case HCI_EVENT_COMMAND_STATUS:
179 hci_event_command_status(unit, m);
180 break;
181
182 case HCI_EVENT_COMMAND_COMPL:
183 hci_event_command_compl(unit, m);
184 break;
185
186 case HCI_EVENT_NUM_COMPL_PKTS:
187 hci_event_num_compl_pkts(unit, m);
188 break;
189
190 case HCI_EVENT_INQUIRY_RESULT:
191 hci_event_inquiry_result(unit, m);
192 break;
193
194 case HCI_EVENT_RSSI_RESULT:
195 hci_event_rssi_result(unit, m);
196 break;
197
198 case HCI_EVENT_CON_COMPL:
199 hci_event_con_compl(unit, m);
200 break;
201
202 case HCI_EVENT_DISCON_COMPL:
203 hci_event_discon_compl(unit, m);
204 break;
205
206 case HCI_EVENT_CON_REQ:
207 hci_event_con_req(unit, m);
208 break;
209
210 case HCI_EVENT_AUTH_COMPL:
211 hci_event_auth_compl(unit, m);
212 break;
213
214 case HCI_EVENT_ENCRYPTION_CHANGE:
215 hci_event_encryption_change(unit, m);
216 break;
217
218 case HCI_EVENT_CHANGE_CON_LINK_KEY_COMPL:
219 hci_event_change_con_link_key_compl(unit, m);
220 break;
221
222 case HCI_EVENT_READ_CLOCK_OFFSET_COMPL:
223 hci_event_read_clock_offset_compl(unit, m);
224 break;
225
226 default:
227 break;
228 }
229
230 m_freem(m);
231 }
232
233 /*
234 * Command Status
235 *
236 * Update our record of num_cmd_pkts then post-process any pending commands
237 * and optionally restart cmd output on the unit.
238 */
239 static void
240 hci_event_command_status(struct hci_unit *unit, struct mbuf *m)
241 {
242 hci_command_status_ep ep;
243
244 KASSERT(m->m_pkthdr.len >= sizeof(ep));
245 m_copydata(m, 0, sizeof(ep), &ep);
246 m_adj(m, sizeof(ep));
247
248 ep.opcode = le16toh(ep.opcode);
249
250 DPRINTFN(1, "(%s) opcode (%03x|%04x) status = 0x%x num_cmd_pkts = %d\n",
251 device_xname(unit->hci_dev),
252 HCI_OGF(ep.opcode), HCI_OCF(ep.opcode),
253 ep.status,
254 ep.num_cmd_pkts);
255
256 unit->hci_num_cmd_pkts = ep.num_cmd_pkts;
257
258 /*
259 * post processing of pending commands
260 */
261 switch(ep.opcode) {
262 case HCI_CMD_CREATE_CON:
263 hci_cmd_create_con(unit, ep.status);
264 break;
265
266 default:
267 if (ep.status == 0)
268 break;
269
270 aprint_error_dev(unit->hci_dev,
271 "CommandStatus opcode (%03x|%04x) failed (status=0x%02x)\n",
272 HCI_OGF(ep.opcode), HCI_OCF(ep.opcode),
273 ep.status);
274
275 break;
276 }
277
278 while (unit->hci_num_cmd_pkts > 0 && MBUFQ_FIRST(&unit->hci_cmdwait)) {
279 MBUFQ_DEQUEUE(&unit->hci_cmdwait, m);
280 hci_output_cmd(unit, m);
281 }
282 }
283
284 /*
285 * Command Complete
286 *
287 * Update our record of num_cmd_pkts then handle the completed command,
288 * and optionally restart cmd output on the unit.
289 */
290 static void
291 hci_event_command_compl(struct hci_unit *unit, struct mbuf *m)
292 {
293 hci_command_compl_ep ep;
294 hci_status_rp rp;
295
296 KASSERT(m->m_pkthdr.len >= sizeof(ep));
297 m_copydata(m, 0, sizeof(ep), &ep);
298 m_adj(m, sizeof(ep));
299
300 DPRINTFN(1, "(%s) opcode (%03x|%04x) num_cmd_pkts = %d\n",
301 device_xname(unit->hci_dev),
302 HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
303 ep.num_cmd_pkts);
304
305 /*
306 * I am not sure if this is completely correct, it is not guaranteed
307 * that a command_complete packet will contain the status though most
308 * do seem to.
309 */
310 m_copydata(m, 0, sizeof(rp), &rp);
311 if (rp.status > 0)
312 aprint_error_dev(unit->hci_dev,
313 "CommandComplete opcode (%03x|%04x) failed (status=0x%02x)\n",
314 HCI_OGF(le16toh(ep.opcode)), HCI_OCF(le16toh(ep.opcode)),
315 rp.status);
316
317 unit->hci_num_cmd_pkts = ep.num_cmd_pkts;
318
319 /*
320 * post processing of completed commands
321 */
322 switch(le16toh(ep.opcode)) {
323 case HCI_CMD_READ_BDADDR:
324 hci_cmd_read_bdaddr(unit, m);
325 break;
326
327 case HCI_CMD_READ_BUFFER_SIZE:
328 hci_cmd_read_buffer_size(unit, m);
329 break;
330
331 case HCI_CMD_READ_LOCAL_FEATURES:
332 hci_cmd_read_local_features(unit, m);
333 break;
334
335 case HCI_CMD_READ_LOCAL_VER:
336 hci_cmd_read_local_ver(unit, m);
337 break;
338
339 case HCI_CMD_READ_LOCAL_COMMANDS:
340 hci_cmd_read_local_commands(unit, m);
341 break;
342
343 case HCI_CMD_RESET:
344 hci_cmd_reset(unit, m);
345 break;
346
347 default:
348 break;
349 }
350
351 while (unit->hci_num_cmd_pkts > 0 && MBUFQ_FIRST(&unit->hci_cmdwait)) {
352 MBUFQ_DEQUEUE(&unit->hci_cmdwait, m);
353 hci_output_cmd(unit, m);
354 }
355 }
356
357 /*
358 * Number of Completed Packets
359 *
360 * This is sent periodically by the Controller telling us how many
361 * buffers are now freed up and which handle was using them. From
362 * this we determine which type of buffer it was and add the qty
363 * back into the relevant packet counter, then restart output on
364 * links that have halted.
365 */
366 static void
367 hci_event_num_compl_pkts(struct hci_unit *unit, struct mbuf *m)
368 {
369 hci_num_compl_pkts_ep ep;
370 struct hci_link *link, *next;
371 uint16_t handle, num;
372 int num_acl = 0, num_sco = 0;
373
374 KASSERT(m->m_pkthdr.len >= sizeof(ep));
375 m_copydata(m, 0, sizeof(ep), &ep);
376 m_adj(m, sizeof(ep));
377
378 while (ep.num_con_handles--) {
379 m_copydata(m, 0, sizeof(handle), &handle);
380 m_adj(m, sizeof(handle));
381 handle = le16toh(handle);
382
383 m_copydata(m, 0, sizeof(num), &num);
384 m_adj(m, sizeof(num));
385 num = le16toh(num);
386
387 link = hci_link_lookup_handle(unit, handle);
388 if (link) {
389 if (link->hl_type == HCI_LINK_ACL) {
390 num_acl += num;
391 hci_acl_complete(link, num);
392 } else {
393 num_sco += num;
394 hci_sco_complete(link, num);
395 }
396 } else {
397 /* XXX need to issue Read_Buffer_Size or Reset? */
398 aprint_error_dev(unit->hci_dev,
399 "unknown handle %d! (losing track of %d packet buffer%s)\n",
400 handle, num, (num == 1 ? "" : "s"));
401 }
402 }
403
404 /*
405 * Move up any queued packets. When a link has sent data, it will move
406 * to the back of the queue - technically then if a link had something
407 * to send and there were still buffers available it could get started
408 * twice but it seemed more important to to handle higher loads fairly
409 * than worry about wasting cycles when we are not busy.
410 */
411
412 unit->hci_num_acl_pkts += num_acl;
413 unit->hci_num_sco_pkts += num_sco;
414
415 link = TAILQ_FIRST(&unit->hci_links);
416 while (link && (unit->hci_num_acl_pkts > 0 || unit->hci_num_sco_pkts > 0)) {
417 next = TAILQ_NEXT(link, hl_next);
418
419 if (link->hl_type == HCI_LINK_ACL) {
420 if (unit->hci_num_acl_pkts > 0 && link->hl_txqlen > 0)
421 hci_acl_start(link);
422 } else {
423 if (unit->hci_num_sco_pkts > 0 && link->hl_txqlen > 0)
424 hci_sco_start(link);
425 }
426
427 link = next;
428 }
429 }
430
431 /*
432 * Inquiry Result
433 *
434 * keep a note of devices seen, so we know which unit to use
435 * on outgoing connections
436 */
437 static void
438 hci_event_inquiry_result(struct hci_unit *unit, struct mbuf *m)
439 {
440 hci_inquiry_result_ep ep;
441 hci_inquiry_response ir;
442 struct hci_memo *memo;
443
444 KASSERT(m->m_pkthdr.len >= sizeof(ep));
445 m_copydata(m, 0, sizeof(ep), &ep);
446 m_adj(m, sizeof(ep));
447
448 DPRINTFN(1, "%d response%s\n", ep.num_responses,
449 (ep.num_responses == 1 ? "" : "s"));
450
451 while(ep.num_responses--) {
452 KASSERT(m->m_pkthdr.len >= sizeof(ir));
453 m_copydata(m, 0, sizeof(ir), &ir);
454 m_adj(m, sizeof(ir));
455
456 DPRINTFN(1, "bdaddr %02x:%02x:%02x:%02x:%02x:%02x\n",
457 ir.bdaddr.b[5], ir.bdaddr.b[4], ir.bdaddr.b[3],
458 ir.bdaddr.b[2], ir.bdaddr.b[1], ir.bdaddr.b[0]);
459
460 memo = hci_memo_new(unit, &ir.bdaddr);
461 if (memo != NULL) {
462 memo->page_scan_rep_mode = ir.page_scan_rep_mode;
463 memo->page_scan_mode = ir.page_scan_mode;
464 memo->clock_offset = ir.clock_offset;
465 }
466 }
467 }
468
469 /*
470 * Inquiry Result with RSSI
471 *
472 * as above but different packet when RSSI result is enabled
473 */
474 static void
475 hci_event_rssi_result(struct hci_unit *unit, struct mbuf *m)
476 {
477 hci_rssi_result_ep ep;
478 hci_rssi_response rr;
479 struct hci_memo *memo;
480
481 KASSERT(m->m_pkthdr.len >= sizeof(ep));
482 m_copydata(m, 0, sizeof(ep), &ep);
483 m_adj(m, sizeof(ep));
484
485 DPRINTFN(1, "%d response%s\n", ep.num_responses,
486 (ep.num_responses == 1 ? "" : "s"));
487
488 while(ep.num_responses--) {
489 KASSERT(m->m_pkthdr.len >= sizeof(rr));
490 m_copydata(m, 0, sizeof(rr), &rr);
491 m_adj(m, sizeof(rr));
492
493 DPRINTFN(1, "bdaddr %02x:%02x:%02x:%02x:%02x:%02x\n",
494 rr.bdaddr.b[5], rr.bdaddr.b[4], rr.bdaddr.b[3],
495 rr.bdaddr.b[2], rr.bdaddr.b[1], rr.bdaddr.b[0]);
496
497 memo = hci_memo_new(unit, &rr.bdaddr);
498 if (memo != NULL) {
499 memo->page_scan_rep_mode = rr.page_scan_rep_mode;
500 memo->page_scan_mode = 0;
501 memo->clock_offset = rr.clock_offset;
502 }
503 }
504 }
505
506 /*
507 * Connection Complete
508 *
509 * Sent to us when a connection is made. If there is no link
510 * structure already allocated for this, we must have changed
511 * our mind, so just disconnect.
512 */
513 static void
514 hci_event_con_compl(struct hci_unit *unit, struct mbuf *m)
515 {
516 hci_con_compl_ep ep;
517 hci_write_link_policy_settings_cp cp;
518 struct hci_link *link;
519 int err;
520
521 KASSERT(m->m_pkthdr.len >= sizeof(ep));
522 m_copydata(m, 0, sizeof(ep), &ep);
523 m_adj(m, sizeof(ep));
524
525 DPRINTFN(1, "(%s) %s connection complete for "
526 "%02x:%02x:%02x:%02x:%02x:%02x status %#x\n",
527 device_xname(unit->hci_dev),
528 (ep.link_type == HCI_LINK_ACL ? "ACL" : "SCO"),
529 ep.bdaddr.b[5], ep.bdaddr.b[4], ep.bdaddr.b[3],
530 ep.bdaddr.b[2], ep.bdaddr.b[1], ep.bdaddr.b[0],
531 ep.status);
532
533 link = hci_link_lookup_bdaddr(unit, &ep.bdaddr, ep.link_type);
534
535 if (ep.status) {
536 if (link != NULL) {
537 switch (ep.status) {
538 case 0x04: /* "Page Timeout" */
539 err = EHOSTDOWN;
540 break;
541
542 case 0x08: /* "Connection Timed Out" */
543 err = ETIMEDOUT;
544 break;
545
546 case 0x16: /* "Connection Terminated by Local Host" */
547 err = 0;
548 break;
549
550 default:
551 err = ECONNREFUSED;
552 break;
553 }
554
555 hci_link_free(link, err);
556 }
557
558 return;
559 }
560
561 if (link == NULL) {
562 hci_discon_cp dp;
563
564 dp.con_handle = ep.con_handle;
565 dp.reason = 0x13; /* "Remote User Terminated Connection" */
566
567 hci_send_cmd(unit, HCI_CMD_DISCONNECT, &dp, sizeof(dp));
568 return;
569 }
570
571 /* XXX could check auth_enable here */
572
573 if (ep.encryption_mode)
574 link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_ENCRYPT);
575
576 link->hl_state = HCI_LINK_OPEN;
577 link->hl_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
578
579 if (ep.link_type == HCI_LINK_ACL) {
580 cp.con_handle = ep.con_handle;
581 cp.settings = htole16(unit->hci_link_policy);
582 err = hci_send_cmd(unit, HCI_CMD_WRITE_LINK_POLICY_SETTINGS,
583 &cp, sizeof(cp));
584 if (err)
585 aprint_error_dev(unit->hci_dev,
586 "Warning, could not write link policy\n");
587
588 err = hci_send_cmd(unit, HCI_CMD_READ_CLOCK_OFFSET,
589 &cp.con_handle, sizeof(cp.con_handle));
590 if (err)
591 aprint_error_dev(unit->hci_dev,
592 "Warning, could not read clock offset\n");
593
594 err = hci_acl_setmode(link);
595 if (err == EINPROGRESS)
596 return;
597
598 hci_acl_linkmode(link);
599 } else {
600 (*link->hl_sco->sp_proto->connected)(link->hl_sco->sp_upper);
601 }
602 }
603
604 /*
605 * Disconnection Complete
606 *
607 * This is sent in response to a disconnection request, but also if
608 * the remote device goes out of range.
609 */
610 static void
611 hci_event_discon_compl(struct hci_unit *unit, struct mbuf *m)
612 {
613 hci_discon_compl_ep ep;
614 struct hci_link *link;
615
616 KASSERT(m->m_pkthdr.len >= sizeof(ep));
617 m_copydata(m, 0, sizeof(ep), &ep);
618 m_adj(m, sizeof(ep));
619
620 ep.con_handle = le16toh(ep.con_handle);
621
622 DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
623
624 link = hci_link_lookup_handle(unit, HCI_CON_HANDLE(ep.con_handle));
625 if (link)
626 hci_link_free(link, ENOLINK);
627 }
628
629 /*
630 * Connect Request
631 *
632 * We check upstream for appropriate listeners and accept connections
633 * that are wanted.
634 */
635 static void
636 hci_event_con_req(struct hci_unit *unit, struct mbuf *m)
637 {
638 hci_con_req_ep ep;
639 hci_accept_con_cp ap;
640 hci_reject_con_cp rp;
641 struct hci_link *link;
642
643 KASSERT(m->m_pkthdr.len >= sizeof(ep));
644 m_copydata(m, 0, sizeof(ep), &ep);
645 m_adj(m, sizeof(ep));
646
647 DPRINTFN(1, "bdaddr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
648 "class %2.2x%2.2x%2.2x type %s\n",
649 ep.bdaddr.b[5], ep.bdaddr.b[4], ep.bdaddr.b[3],
650 ep.bdaddr.b[2], ep.bdaddr.b[1], ep.bdaddr.b[0],
651 ep.uclass[0], ep.uclass[1], ep.uclass[2],
652 ep.link_type == HCI_LINK_ACL ? "ACL" : "SCO");
653
654 if (ep.link_type == HCI_LINK_ACL)
655 link = hci_acl_newconn(unit, &ep.bdaddr);
656 else
657 link = hci_sco_newconn(unit, &ep.bdaddr);
658
659 if (link == NULL) {
660 memset(&rp, 0, sizeof(rp));
661 bdaddr_copy(&rp.bdaddr, &ep.bdaddr);
662 rp.reason = 0x0f; /* Unacceptable BD_ADDR */
663
664 hci_send_cmd(unit, HCI_CMD_REJECT_CON, &rp, sizeof(rp));
665 } else {
666 memset(&ap, 0, sizeof(ap));
667 bdaddr_copy(&ap.bdaddr, &ep.bdaddr);
668 if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
669 ap.role = HCI_ROLE_MASTER;
670 else
671 ap.role = HCI_ROLE_SLAVE;
672
673 hci_send_cmd(unit, HCI_CMD_ACCEPT_CON, &ap, sizeof(ap));
674 }
675 }
676
677 /*
678 * Auth Complete
679 *
680 * Authentication has been completed on an ACL link. We can notify the
681 * upper layer protocols unless further mode changes are pending.
682 */
683 static void
684 hci_event_auth_compl(struct hci_unit *unit, struct mbuf *m)
685 {
686 hci_auth_compl_ep ep;
687 struct hci_link *link;
688 int err;
689
690 KASSERT(m->m_pkthdr.len >= sizeof(ep));
691 m_copydata(m, 0, sizeof(ep), &ep);
692 m_adj(m, sizeof(ep));
693
694 ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
695
696 DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
697
698 link = hci_link_lookup_handle(unit, ep.con_handle);
699 if (link == NULL || link->hl_type != HCI_LINK_ACL)
700 return;
701
702 if (ep.status == 0) {
703 link->hl_flags |= HCI_LINK_AUTH;
704
705 if (link->hl_state == HCI_LINK_WAIT_AUTH)
706 link->hl_state = HCI_LINK_OPEN;
707
708 err = hci_acl_setmode(link);
709 if (err == EINPROGRESS)
710 return;
711 }
712
713 hci_acl_linkmode(link);
714 }
715
716 /*
717 * Encryption Change
718 *
719 * The encryption status has changed. Basically, we note the change
720 * then notify the upper layer protocol unless further mode changes
721 * are pending.
722 * Note that if encryption gets disabled when it has been requested,
723 * we will attempt to enable it again.. (its a feature not a bug :)
724 */
725 static void
726 hci_event_encryption_change(struct hci_unit *unit, struct mbuf *m)
727 {
728 hci_encryption_change_ep ep;
729 struct hci_link *link;
730 int err;
731
732 KASSERT(m->m_pkthdr.len >= sizeof(ep));
733 m_copydata(m, 0, sizeof(ep), &ep);
734 m_adj(m, sizeof(ep));
735
736 ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
737
738 DPRINTFN(1, "handle #%d, status=0x%x, encryption_enable=0x%x\n",
739 ep.con_handle, ep.status, ep.encryption_enable);
740
741 link = hci_link_lookup_handle(unit, ep.con_handle);
742 if (link == NULL || link->hl_type != HCI_LINK_ACL)
743 return;
744
745 if (ep.status == 0) {
746 if (ep.encryption_enable == 0)
747 link->hl_flags &= ~HCI_LINK_ENCRYPT;
748 else
749 link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_ENCRYPT);
750
751 if (link->hl_state == HCI_LINK_WAIT_ENCRYPT)
752 link->hl_state = HCI_LINK_OPEN;
753
754 err = hci_acl_setmode(link);
755 if (err == EINPROGRESS)
756 return;
757 }
758
759 hci_acl_linkmode(link);
760 }
761
762 /*
763 * Change Connection Link Key Complete
764 *
765 * Link keys are handled in userland but if we are waiting to secure
766 * this link, we should notify the upper protocols. A SECURE request
767 * only needs a single key change, so we can cancel the request.
768 */
769 static void
770 hci_event_change_con_link_key_compl(struct hci_unit *unit, struct mbuf *m)
771 {
772 hci_change_con_link_key_compl_ep ep;
773 struct hci_link *link;
774 int err;
775
776 KASSERT(m->m_pkthdr.len >= sizeof(ep));
777 m_copydata(m, 0, sizeof(ep), &ep);
778 m_adj(m, sizeof(ep));
779
780 ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
781
782 DPRINTFN(1, "handle #%d, status=0x%x\n", ep.con_handle, ep.status);
783
784 link = hci_link_lookup_handle(unit, ep.con_handle);
785 if (link == NULL || link->hl_type != HCI_LINK_ACL)
786 return;
787
788 link->hl_flags &= ~HCI_LINK_SECURE_REQ;
789
790 if (ep.status == 0) {
791 link->hl_flags |= (HCI_LINK_AUTH | HCI_LINK_SECURE);
792
793 if (link->hl_state == HCI_LINK_WAIT_SECURE)
794 link->hl_state = HCI_LINK_OPEN;
795
796 err = hci_acl_setmode(link);
797 if (err == EINPROGRESS)
798 return;
799 }
800
801 hci_acl_linkmode(link);
802 }
803
804 /*
805 * Read Clock Offset Complete
806 *
807 * We keep a note of the clock offset of remote devices when a
808 * link is made, in order to facilitate reconnections to the device
809 */
810 static void
811 hci_event_read_clock_offset_compl(struct hci_unit *unit, struct mbuf *m)
812 {
813 hci_read_clock_offset_compl_ep ep;
814 struct hci_link *link;
815
816 KASSERT(m->m_pkthdr.len >= sizeof(ep));
817 m_copydata(m, 0, sizeof(ep), &ep);
818 m_adj(m, sizeof(ep));
819
820 DPRINTFN(1, "handle #%d, offset=%u, status=0x%x\n",
821 le16toh(ep.con_handle), le16toh(ep.clock_offset), ep.status);
822
823 ep.con_handle = HCI_CON_HANDLE(le16toh(ep.con_handle));
824 link = hci_link_lookup_handle(unit, ep.con_handle);
825
826 if (ep.status != 0 || link == NULL)
827 return;
828
829 link->hl_clock = ep.clock_offset;
830 }
831
832 /*
833 * process results of read_bdaddr command_complete event
834 */
835 static void
836 hci_cmd_read_bdaddr(struct hci_unit *unit, struct mbuf *m)
837 {
838 hci_read_bdaddr_rp rp;
839
840 KASSERT(m->m_pkthdr.len >= sizeof(rp));
841 m_copydata(m, 0, sizeof(rp), &rp);
842 m_adj(m, sizeof(rp));
843
844 if (rp.status > 0)
845 return;
846
847 if ((unit->hci_flags & BTF_INIT_BDADDR) == 0)
848 return;
849
850 bdaddr_copy(&unit->hci_bdaddr, &rp.bdaddr);
851
852 unit->hci_flags &= ~BTF_INIT_BDADDR;
853
854 wakeup(unit);
855 }
856
857 /*
858 * process results of read_buffer_size command_complete event
859 */
860 static void
861 hci_cmd_read_buffer_size(struct hci_unit *unit, struct mbuf *m)
862 {
863 hci_read_buffer_size_rp rp;
864
865 KASSERT(m->m_pkthdr.len >= sizeof(rp));
866 m_copydata(m, 0, sizeof(rp), &rp);
867 m_adj(m, sizeof(rp));
868
869 if (rp.status > 0)
870 return;
871
872 if ((unit->hci_flags & BTF_INIT_BUFFER_SIZE) == 0)
873 return;
874
875 unit->hci_max_acl_size = le16toh(rp.max_acl_size);
876 unit->hci_num_acl_pkts = le16toh(rp.num_acl_pkts);
877 unit->hci_max_sco_size = rp.max_sco_size;
878 unit->hci_num_sco_pkts = le16toh(rp.num_sco_pkts);
879
880 unit->hci_flags &= ~BTF_INIT_BUFFER_SIZE;
881
882 wakeup(unit);
883 }
884
885 /*
886 * process results of read_local_features command_complete event
887 */
888 static void
889 hci_cmd_read_local_features(struct hci_unit *unit, struct mbuf *m)
890 {
891 hci_read_local_features_rp rp;
892
893 KASSERT(m->m_pkthdr.len >= sizeof(rp));
894 m_copydata(m, 0, sizeof(rp), &rp);
895 m_adj(m, sizeof(rp));
896
897 if (rp.status > 0)
898 return;
899
900 if ((unit->hci_flags & BTF_INIT_FEATURES) == 0)
901 return;
902
903 unit->hci_lmp_mask = 0;
904
905 if (rp.features[0] & HCI_LMP_ROLE_SWITCH)
906 unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_ROLE_SWITCH;
907
908 if (rp.features[0] & HCI_LMP_HOLD_MODE)
909 unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_HOLD_MODE;
910
911 if (rp.features[0] & HCI_LMP_SNIFF_MODE)
912 unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_SNIFF_MODE;
913
914 if (rp.features[1] & HCI_LMP_PARK_MODE)
915 unit->hci_lmp_mask |= HCI_LINK_POLICY_ENABLE_PARK_MODE;
916
917 /* ACL packet mask */
918 unit->hci_acl_mask = HCI_PKT_DM1 | HCI_PKT_DH1;
919
920 if (rp.features[0] & HCI_LMP_3SLOT)
921 unit->hci_acl_mask |= HCI_PKT_DM3 | HCI_PKT_DH3;
922
923 if (rp.features[0] & HCI_LMP_5SLOT)
924 unit->hci_acl_mask |= HCI_PKT_DM5 | HCI_PKT_DH5;
925
926 if ((rp.features[3] & HCI_LMP_EDR_ACL_2MBPS) == 0)
927 unit->hci_acl_mask |= HCI_PKT_2MBPS_DH1
928 | HCI_PKT_2MBPS_DH3
929 | HCI_PKT_2MBPS_DH5;
930
931 if ((rp.features[3] & HCI_LMP_EDR_ACL_3MBPS) == 0)
932 unit->hci_acl_mask |= HCI_PKT_3MBPS_DH1
933 | HCI_PKT_3MBPS_DH3
934 | HCI_PKT_3MBPS_DH5;
935
936 if ((rp.features[4] & HCI_LMP_3SLOT_EDR_ACL) == 0)
937 unit->hci_acl_mask |= HCI_PKT_2MBPS_DH3
938 | HCI_PKT_3MBPS_DH3;
939
940 if ((rp.features[5] & HCI_LMP_5SLOT_EDR_ACL) == 0)
941 unit->hci_acl_mask |= HCI_PKT_2MBPS_DH5
942 | HCI_PKT_3MBPS_DH5;
943
944 unit->hci_packet_type = unit->hci_acl_mask;
945
946 /* SCO packet mask */
947 unit->hci_sco_mask = 0;
948 if (rp.features[1] & HCI_LMP_SCO_LINK)
949 unit->hci_sco_mask |= HCI_PKT_HV1;
950
951 if (rp.features[1] & HCI_LMP_HV2_PKT)
952 unit->hci_sco_mask |= HCI_PKT_HV2;
953
954 if (rp.features[1] & HCI_LMP_HV3_PKT)
955 unit->hci_sco_mask |= HCI_PKT_HV3;
956
957 if (rp.features[3] & HCI_LMP_EV3_PKT)
958 unit->hci_sco_mask |= HCI_PKT_EV3;
959
960 if (rp.features[4] & HCI_LMP_EV4_PKT)
961 unit->hci_sco_mask |= HCI_PKT_EV4;
962
963 if (rp.features[4] & HCI_LMP_EV5_PKT)
964 unit->hci_sco_mask |= HCI_PKT_EV5;
965
966 /* XXX what do 2MBPS/3MBPS/3SLOT eSCO mean? */
967
968 unit->hci_flags &= ~BTF_INIT_FEATURES;
969
970 wakeup(unit);
971
972 DPRINTFN(1, "%s: lmp_mask %4.4x, acl_mask %4.4x, sco_mask %4.4x\n",
973 device_xname(unit->hci_dev), unit->hci_lmp_mask,
974 unit->hci_acl_mask, unit->hci_sco_mask);
975 }
976
977 /*
978 * process results of read_local_ver command_complete event
979 *
980 * reading local supported commands is only supported from 1.2 spec
981 */
982 static void
983 hci_cmd_read_local_ver(struct hci_unit *unit, struct mbuf *m)
984 {
985 hci_read_local_ver_rp rp;
986
987 KASSERT(m->m_pkthdr.len >= sizeof(rp));
988 m_copydata(m, 0, sizeof(rp), &rp);
989 m_adj(m, sizeof(rp));
990
991 if (rp.status != 0)
992 return;
993
994 if ((unit->hci_flags & BTF_INIT_COMMANDS) == 0)
995 return;
996
997 if (rp.hci_version < HCI_SPEC_V12) {
998 unit->hci_flags &= ~BTF_INIT_COMMANDS;
999 wakeup(unit);
1000 return;
1001 }
1002
1003 hci_send_cmd(unit, HCI_CMD_READ_LOCAL_COMMANDS, NULL, 0);
1004 }
1005
1006 /*
1007 * process results of read_local_commands command_complete event
1008 */
1009 static void
1010 hci_cmd_read_local_commands(struct hci_unit *unit, struct mbuf *m)
1011 {
1012 hci_read_local_commands_rp rp;
1013
1014 KASSERT(m->m_pkthdr.len >= sizeof(rp));
1015 m_copydata(m, 0, sizeof(rp), &rp);
1016 m_adj(m, sizeof(rp));
1017
1018 if (rp.status != 0)
1019 return;
1020
1021 if ((unit->hci_flags & BTF_INIT_COMMANDS) == 0)
1022 return;
1023
1024 unit->hci_flags &= ~BTF_INIT_COMMANDS;
1025 memcpy(unit->hci_cmds, rp.commands, HCI_COMMANDS_SIZE);
1026
1027 wakeup(unit);
1028 }
1029
1030 /*
1031 * process results of reset command_complete event
1032 *
1033 * This has killed all the connections, so close down anything we have left,
1034 * and reinitialise the unit.
1035 */
1036 static void
1037 hci_cmd_reset(struct hci_unit *unit, struct mbuf *m)
1038 {
1039 hci_reset_rp rp;
1040 struct hci_link *link, *next;
1041 int acl;
1042
1043 KASSERT(m->m_pkthdr.len >= sizeof(rp));
1044 m_copydata(m, 0, sizeof(rp), &rp);
1045 m_adj(m, sizeof(rp));
1046
1047 if (rp.status != 0)
1048 return;
1049
1050 /*
1051 * release SCO links first, since they may be holding
1052 * an ACL link reference.
1053 */
1054 for (acl = 0 ; acl < 2 ; acl++) {
1055 next = TAILQ_FIRST(&unit->hci_links);
1056 while ((link = next) != NULL) {
1057 next = TAILQ_NEXT(link, hl_next);
1058 if (acl || link->hl_type != HCI_LINK_ACL)
1059 hci_link_free(link, ECONNABORTED);
1060 }
1061 }
1062
1063 unit->hci_num_acl_pkts = 0;
1064 unit->hci_num_sco_pkts = 0;
1065
1066 if (hci_send_cmd(unit, HCI_CMD_READ_BDADDR, NULL, 0))
1067 return;
1068
1069 if (hci_send_cmd(unit, HCI_CMD_READ_BUFFER_SIZE, NULL, 0))
1070 return;
1071
1072 if (hci_send_cmd(unit, HCI_CMD_READ_LOCAL_FEATURES, NULL, 0))
1073 return;
1074
1075 if (hci_send_cmd(unit, HCI_CMD_READ_LOCAL_VER, NULL, 0))
1076 return;
1077 }
1078
1079 /*
1080 * process command_status event for create_con command
1081 *
1082 * a "Create Connection" command can sometimes fail to start for whatever
1083 * reason and the command_status event returns failure but we get no
1084 * indication of which connection failed (for instance in the case where
1085 * we tried to open too many connections all at once) So, we keep a flag
1086 * on the link to indicate pending status until the command_status event
1087 * is returned to help us decide which needs to be failed.
1088 *
1089 * This code does assume that there will be only one command pending
1090 * but I can't see this being a problem since I have unaware of any
1091 * device that pipelines more than a single command.
1092 */
1093 static void
1094 hci_cmd_create_con(struct hci_unit *unit, uint8_t status)
1095 {
1096 struct hci_link *link;
1097
1098 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1099 if ((link->hl_flags & HCI_LINK_CREATE_CON) == 0)
1100 continue;
1101
1102 link->hl_flags &= ~HCI_LINK_CREATE_CON;
1103
1104 switch(status) {
1105 case 0x00: /* success */
1106 break;
1107
1108 case 0x0c: /* "Command Disallowed" */
1109 hci_link_free(link, EBUSY);
1110 break;
1111
1112 default: /* some other trouble */
1113 hci_link_free(link, EPROTO);
1114 break;
1115 }
1116
1117 return;
1118 }
1119 }
1120