hci_link.c revision 1.20.2.1 1 /* $NetBSD: hci_link.c,v 1.20.2.1 2010/03/11 15:04:28 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2005 Iain Hibbert.
5 * Copyright (c) 2006 Itronix Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of Itronix Inc. may not be used to endorse
17 * or promote products derived from this software without specific
18 * prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: hci_link.c,v 1.20.2.1 2010/03/11 15:04:28 yamt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/systm.h>
43
44 #include <netbt/bluetooth.h>
45 #include <netbt/hci.h>
46 #include <netbt/l2cap.h>
47 #include <netbt/sco.h>
48
49 /*******************************************************************************
50 *
51 * HCI ACL Connections
52 */
53
54 /*
55 * Automatically expire unused ACL connections after this number of
56 * seconds (if zero, do not expire unused connections) [sysctl]
57 */
58 int hci_acl_expiry = 10; /* seconds */
59
60 /*
61 * hci_acl_open(unit, bdaddr)
62 *
63 * open ACL connection to remote bdaddr. Only one ACL connection is permitted
64 * between any two Bluetooth devices, so we look for an existing one before
65 * trying to start a new one.
66 */
67 struct hci_link *
68 hci_acl_open(struct hci_unit *unit, bdaddr_t *bdaddr)
69 {
70 struct hci_link *link;
71 struct hci_memo *memo;
72 hci_create_con_cp cp;
73 int err;
74
75 KASSERT(unit != NULL);
76 KASSERT(bdaddr != NULL);
77
78 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
79 if (link == NULL) {
80 link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
81 if (link == NULL)
82 return NULL;
83 }
84
85 switch(link->hl_state) {
86 case HCI_LINK_CLOSED:
87 /*
88 * open connection to remote device
89 */
90 memset(&cp, 0, sizeof(cp));
91 bdaddr_copy(&cp.bdaddr, bdaddr);
92 cp.pkt_type = htole16(unit->hci_packet_type);
93
94 memo = hci_memo_find(unit, bdaddr);
95 if (memo != NULL) {
96 cp.page_scan_rep_mode = memo->page_scan_rep_mode;
97 cp.page_scan_mode = memo->page_scan_mode;
98 cp.clock_offset = memo->clock_offset;
99 }
100
101 if (unit->hci_link_policy & HCI_LINK_POLICY_ENABLE_ROLE_SWITCH)
102 cp.accept_role_switch = 1;
103
104 err = hci_send_cmd(unit, HCI_CMD_CREATE_CON, &cp, sizeof(cp));
105 if (err) {
106 hci_link_free(link, err);
107 return NULL;
108 }
109
110 link->hl_flags |= HCI_LINK_CREATE_CON;
111 link->hl_state = HCI_LINK_WAIT_CONNECT;
112 break;
113
114 case HCI_LINK_WAIT_CONNECT:
115 case HCI_LINK_WAIT_AUTH:
116 case HCI_LINK_WAIT_ENCRYPT:
117 case HCI_LINK_WAIT_SECURE:
118 /*
119 * somebody else already trying to connect, we just
120 * sit on the bench with them..
121 */
122 break;
123
124 case HCI_LINK_OPEN:
125 /*
126 * If already open, halt any expiry timeouts. We dont need
127 * to care about already invoking timeouts since refcnt >0
128 * will keep the link alive.
129 */
130 callout_stop(&link->hl_expire);
131 break;
132
133 default:
134 UNKNOWN(link->hl_state);
135 return NULL;
136 }
137
138 /* open */
139 link->hl_refcnt++;
140
141 return link;
142 }
143
144 /*
145 * Close ACL connection. When there are no more references to this link,
146 * we can either close it down or schedule a delayed closedown.
147 */
148 void
149 hci_acl_close(struct hci_link *link, int err)
150 {
151
152 KASSERT(link != NULL);
153
154 if (--link->hl_refcnt == 0) {
155 if (link->hl_state == HCI_LINK_CLOSED)
156 hci_link_free(link, err);
157 else if (hci_acl_expiry > 0)
158 callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
159 }
160 }
161
162 /*
163 * Incoming ACL connection.
164 *
165 * Check the L2CAP listeners list and only accept when there is a
166 * potential listener available.
167 *
168 * There should not be a link to the same bdaddr already, we check
169 * anyway though its left unhandled for now.
170 */
171 struct hci_link *
172 hci_acl_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
173 {
174 struct hci_link *link;
175 struct l2cap_channel *chan;
176
177 LIST_FOREACH(chan, &l2cap_listen_list, lc_ncid) {
178 if (bdaddr_same(&unit->hci_bdaddr, &chan->lc_laddr.bt_bdaddr)
179 || bdaddr_any(&chan->lc_laddr.bt_bdaddr))
180 break;
181 }
182
183 if (chan == NULL) {
184 DPRINTF("%s: rejecting connection (no listeners)\n",
185 device_xname(unit->hci_dev));
186
187 return NULL;
188 }
189
190 link = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
191 if (link != NULL) {
192 DPRINTF("%s: rejecting connection (link exists)\n",
193 device_xname(unit->hci_dev));
194
195 return NULL;
196 }
197
198 link = hci_link_alloc(unit, bdaddr, HCI_LINK_ACL);
199 if (link != NULL) {
200 link->hl_state = HCI_LINK_WAIT_CONNECT;
201
202 if (hci_acl_expiry > 0)
203 callout_schedule(&link->hl_expire, hci_acl_expiry * hz);
204 }
205
206 return link;
207 }
208
209 void
210 hci_acl_timeout(void *arg)
211 {
212 struct hci_link *link = arg;
213 hci_discon_cp cp;
214 int err;
215
216 mutex_enter(bt_lock);
217 callout_ack(&link->hl_expire);
218
219 if (link->hl_refcnt > 0)
220 goto out;
221
222 DPRINTF("link #%d expired\n", link->hl_handle);
223
224 switch (link->hl_state) {
225 case HCI_LINK_CLOSED:
226 case HCI_LINK_WAIT_CONNECT:
227 hci_link_free(link, ECONNRESET);
228 break;
229
230 case HCI_LINK_WAIT_AUTH:
231 case HCI_LINK_WAIT_ENCRYPT:
232 case HCI_LINK_WAIT_SECURE:
233 case HCI_LINK_OPEN:
234 cp.con_handle = htole16(link->hl_handle);
235 cp.reason = 0x13; /* "Remote User Terminated Connection" */
236
237 err = hci_send_cmd(link->hl_unit, HCI_CMD_DISCONNECT,
238 &cp, sizeof(cp));
239
240 if (err) {
241 DPRINTF("error %d sending HCI_CMD_DISCONNECT\n",
242 err);
243 }
244
245 break;
246
247 default:
248 UNKNOWN(link->hl_state);
249 break;
250 }
251
252 out:
253 mutex_exit(bt_lock);
254 }
255
256 /*
257 * Initiate any Link Mode change requests.
258 */
259 int
260 hci_acl_setmode(struct hci_link *link)
261 {
262 int err;
263
264 KASSERT(link != NULL);
265 KASSERT(link->hl_unit != NULL);
266
267 if (link->hl_state != HCI_LINK_OPEN)
268 return EINPROGRESS;
269
270 if ((link->hl_flags & HCI_LINK_AUTH_REQ)
271 && !(link->hl_flags & HCI_LINK_AUTH)) {
272 hci_auth_req_cp cp;
273
274 DPRINTF("requesting auth for handle #%d\n",
275 link->hl_handle);
276
277 link->hl_state = HCI_LINK_WAIT_AUTH;
278 cp.con_handle = htole16(link->hl_handle);
279 err = hci_send_cmd(link->hl_unit, HCI_CMD_AUTH_REQ,
280 &cp, sizeof(cp));
281
282 return (err == 0 ? EINPROGRESS : err);
283 }
284
285 if ((link->hl_flags & HCI_LINK_ENCRYPT_REQ)
286 && !(link->hl_flags & HCI_LINK_ENCRYPT)) {
287 hci_set_con_encryption_cp cp;
288
289 /* XXX we should check features for encryption capability */
290
291 DPRINTF("requesting encryption for handle #%d\n",
292 link->hl_handle);
293
294 link->hl_state = HCI_LINK_WAIT_ENCRYPT;
295 cp.con_handle = htole16(link->hl_handle);
296 cp.encryption_enable = 0x01;
297
298 err = hci_send_cmd(link->hl_unit, HCI_CMD_SET_CON_ENCRYPTION,
299 &cp, sizeof(cp));
300
301 return (err == 0 ? EINPROGRESS : err);
302 }
303
304 if ((link->hl_flags & HCI_LINK_SECURE_REQ)) {
305 hci_change_con_link_key_cp cp;
306
307 /* always change link key for SECURE requests */
308 link->hl_flags &= ~HCI_LINK_SECURE;
309
310 DPRINTF("changing link key for handle #%d\n",
311 link->hl_handle);
312
313 link->hl_state = HCI_LINK_WAIT_SECURE;
314 cp.con_handle = htole16(link->hl_handle);
315
316 err = hci_send_cmd(link->hl_unit, HCI_CMD_CHANGE_CON_LINK_KEY,
317 &cp, sizeof(cp));
318
319 return (err == 0 ? EINPROGRESS : err);
320 }
321
322 return 0;
323 }
324
325 /*
326 * Link Mode changed.
327 *
328 * This is called from event handlers when the mode change
329 * is complete. We notify upstream and restart the link.
330 */
331 void
332 hci_acl_linkmode(struct hci_link *link)
333 {
334 struct l2cap_channel *chan, *next;
335 int err, mode = 0;
336
337 DPRINTF("handle #%d, auth %s, encrypt %s, secure %s\n",
338 link->hl_handle,
339 (link->hl_flags & HCI_LINK_AUTH ? "on" : "off"),
340 (link->hl_flags & HCI_LINK_ENCRYPT ? "on" : "off"),
341 (link->hl_flags & HCI_LINK_SECURE ? "on" : "off"));
342
343 if (link->hl_flags & HCI_LINK_AUTH)
344 mode |= L2CAP_LM_AUTH;
345
346 if (link->hl_flags & HCI_LINK_ENCRYPT)
347 mode |= L2CAP_LM_ENCRYPT;
348
349 if (link->hl_flags & HCI_LINK_SECURE)
350 mode |= L2CAP_LM_SECURE;
351
352 /*
353 * The link state will only be OPEN here if the mode change
354 * was successful. So, we can proceed with L2CAP connections,
355 * or notify already establshed channels, to allow any that
356 * are dissatisfied to disconnect before we restart.
357 */
358 next = LIST_FIRST(&l2cap_active_list);
359 while ((chan = next) != NULL) {
360 next = LIST_NEXT(chan, lc_ncid);
361
362 if (chan->lc_link != link)
363 continue;
364
365 switch(chan->lc_state) {
366 case L2CAP_WAIT_SEND_CONNECT_REQ: /* we are connecting */
367 if ((mode & chan->lc_mode) != chan->lc_mode) {
368 l2cap_close(chan, ECONNABORTED);
369 break;
370 }
371
372 chan->lc_state = L2CAP_WAIT_RECV_CONNECT_RSP;
373 err = l2cap_send_connect_req(chan);
374 if (err) {
375 l2cap_close(chan, err);
376 break;
377 }
378 break;
379
380 case L2CAP_WAIT_SEND_CONNECT_RSP: /* they are connecting */
381 if ((mode & chan->lc_mode) != chan->lc_mode) {
382 l2cap_send_connect_rsp(link, chan->lc_ident,
383 0, chan->lc_rcid,
384 L2CAP_SECURITY_BLOCK);
385
386 l2cap_close(chan, ECONNABORTED);
387 break;
388 }
389
390 l2cap_send_connect_rsp(link, chan->lc_ident,
391 chan->lc_lcid, chan->lc_rcid,
392 L2CAP_SUCCESS);
393
394 chan->lc_state = L2CAP_WAIT_CONFIG;
395 chan->lc_flags |= (L2CAP_WAIT_CONFIG_RSP | L2CAP_WAIT_CONFIG_REQ);
396 err = l2cap_send_config_req(chan);
397 if (err) {
398 l2cap_close(chan, err);
399 break;
400 }
401 break;
402
403 case L2CAP_WAIT_RECV_CONNECT_RSP:
404 case L2CAP_WAIT_CONFIG:
405 case L2CAP_OPEN: /* already established */
406 (*chan->lc_proto->linkmode)(chan->lc_upper, mode);
407 break;
408
409 default:
410 break;
411 }
412 }
413
414 link->hl_state = HCI_LINK_OPEN;
415 hci_acl_start(link);
416 }
417
418 /*
419 * Receive ACL Data
420 *
421 * we accumulate packet fragments on the hci_link structure
422 * until a full L2CAP frame is ready, then send it on.
423 */
424 void
425 hci_acl_recv(struct mbuf *m, struct hci_unit *unit)
426 {
427 struct hci_link *link;
428 hci_acldata_hdr_t hdr;
429 uint16_t handle, want;
430 int pb, got;
431
432 KASSERT(m != NULL);
433 KASSERT(unit != NULL);
434
435 KASSERT(m->m_pkthdr.len >= sizeof(hdr));
436 m_copydata(m, 0, sizeof(hdr), &hdr);
437 m_adj(m, sizeof(hdr));
438
439 #ifdef DIAGNOSTIC
440 if (hdr.type != HCI_ACL_DATA_PKT) {
441 aprint_error_dev(unit->hci_dev, "bad ACL packet type\n");
442 goto bad;
443 }
444
445 if (m->m_pkthdr.len != le16toh(hdr.length)) {
446 aprint_error_dev(unit->hci_dev,
447 "bad ACL packet length (%d != %d)\n",
448 m->m_pkthdr.len, le16toh(hdr.length));
449 goto bad;
450 }
451 #endif
452
453 hdr.length = le16toh(hdr.length);
454 hdr.con_handle = le16toh(hdr.con_handle);
455 handle = HCI_CON_HANDLE(hdr.con_handle);
456 pb = HCI_PB_FLAG(hdr.con_handle);
457
458 link = hci_link_lookup_handle(unit, handle);
459 if (link == NULL) {
460 hci_discon_cp cp;
461
462 DPRINTF("%s: dumping packet for unknown handle #%d\n",
463 device_xname(unit->hci_dev), handle);
464
465 /*
466 * There is no way to find out what this connection handle is
467 * for, just get rid of it. This may happen, if a USB dongle
468 * is plugged into a self powered hub and does not reset when
469 * the system is shut down.
470 */
471 cp.con_handle = htole16(handle);
472 cp.reason = 0x13; /* "Remote User Terminated Connection" */
473 hci_send_cmd(unit, HCI_CMD_DISCONNECT, &cp, sizeof(cp));
474 goto bad;
475 }
476
477 switch (pb) {
478 case HCI_PACKET_START:
479 if (link->hl_rxp != NULL)
480 aprint_error_dev(unit->hci_dev,
481 "dropped incomplete ACL packet\n");
482
483 if (m->m_pkthdr.len < sizeof(l2cap_hdr_t)) {
484 aprint_error_dev(unit->hci_dev, "short ACL packet\n");
485 goto bad;
486 }
487
488 link->hl_rxp = m;
489 got = m->m_pkthdr.len;
490 break;
491
492 case HCI_PACKET_FRAGMENT:
493 if (link->hl_rxp == NULL) {
494 aprint_error_dev(unit->hci_dev,
495 "unexpected packet fragment\n");
496
497 goto bad;
498 }
499
500 got = m->m_pkthdr.len + link->hl_rxp->m_pkthdr.len;
501 m_cat(link->hl_rxp, m);
502 m = link->hl_rxp;
503 m->m_pkthdr.len = got;
504 break;
505
506 default:
507 aprint_error_dev(unit->hci_dev, "unknown packet type\n");
508 goto bad;
509 }
510
511 m_copydata(m, 0, sizeof(want), &want);
512 want = le16toh(want) + sizeof(l2cap_hdr_t) - got;
513
514 if (want > 0)
515 return;
516
517 link->hl_rxp = NULL;
518
519 if (want == 0) {
520 l2cap_recv_frame(m, link);
521 return;
522 }
523
524 bad:
525 m_freem(m);
526 }
527
528 /*
529 * Send ACL data on link
530 *
531 * We must fragment packets into chunks of less than unit->hci_max_acl_size and
532 * prepend a relevant ACL header to each fragment. We keep a PDU structure
533 * attached to the link, so that completed fragments can be marked off and
534 * more data requested from above once the PDU is sent.
535 */
536 int
537 hci_acl_send(struct mbuf *m, struct hci_link *link,
538 struct l2cap_channel *chan)
539 {
540 struct l2cap_pdu *pdu;
541 struct mbuf *n = NULL;
542 int plen, mlen, num = 0;
543
544 KASSERT(link != NULL);
545 KASSERT(m != NULL);
546 KASSERT(m->m_flags & M_PKTHDR);
547 KASSERT(m->m_pkthdr.len > 0);
548
549 if (link->hl_state == HCI_LINK_CLOSED) {
550 m_freem(m);
551 return ENETDOWN;
552 }
553
554 pdu = pool_get(&l2cap_pdu_pool, PR_NOWAIT);
555 if (pdu == NULL)
556 goto nomem;
557
558 pdu->lp_chan = chan;
559 pdu->lp_pending = 0;
560 MBUFQ_INIT(&pdu->lp_data);
561
562 plen = m->m_pkthdr.len;
563 mlen = link->hl_unit->hci_max_acl_size;
564
565 DPRINTFN(5, "%s: handle #%d, plen = %d, max = %d\n",
566 device_xname(link->hl_unit->hci_dev), link->hl_handle, plen, mlen);
567
568 while (plen > 0) {
569 if (plen > mlen) {
570 n = m_split(m, mlen, M_DONTWAIT);
571 if (n == NULL)
572 goto nomem;
573 } else {
574 mlen = plen;
575 }
576
577 if (num++ == 0)
578 m->m_flags |= M_PROTO1; /* tag first fragment */
579
580 DPRINTFN(10, "chunk of %d (plen = %d) bytes\n", mlen, plen);
581 MBUFQ_ENQUEUE(&pdu->lp_data, m);
582 m = n;
583 plen -= mlen;
584 }
585
586 TAILQ_INSERT_TAIL(&link->hl_txq, pdu, lp_next);
587 link->hl_txqlen += num;
588
589 hci_acl_start(link);
590
591 return 0;
592
593 nomem:
594 if (m) m_freem(m);
595 if (pdu) {
596 MBUFQ_DRAIN(&pdu->lp_data);
597 pool_put(&l2cap_pdu_pool, pdu);
598 }
599
600 return ENOMEM;
601 }
602
603 /*
604 * Start sending ACL data on link.
605 *
606 * This is called when the queue may need restarting: as new data
607 * is queued, after link mode changes have completed, or when device
608 * buffers have cleared.
609 *
610 * We may use all the available packet slots. The reason that we add
611 * the ACL encapsulation here rather than in hci_acl_send() is that L2CAP
612 * signal packets may be queued before the handle is given to us..
613 */
614 void
615 hci_acl_start(struct hci_link *link)
616 {
617 struct hci_unit *unit;
618 hci_acldata_hdr_t *hdr;
619 struct l2cap_pdu *pdu;
620 struct mbuf *m;
621 uint16_t handle;
622
623 KASSERT(link != NULL);
624
625 unit = link->hl_unit;
626 KASSERT(unit != NULL);
627
628 /* this is mainly to block ourselves (below) */
629 if (link->hl_state != HCI_LINK_OPEN)
630 return;
631
632 if (link->hl_txqlen == 0 || unit->hci_num_acl_pkts == 0)
633 return;
634
635 /* find first PDU with data to send */
636 pdu = TAILQ_FIRST(&link->hl_txq);
637 for (;;) {
638 if (pdu == NULL)
639 return;
640
641 if (MBUFQ_FIRST(&pdu->lp_data) != NULL)
642 break;
643
644 pdu = TAILQ_NEXT(pdu, lp_next);
645 }
646
647 while (unit->hci_num_acl_pkts > 0) {
648 MBUFQ_DEQUEUE(&pdu->lp_data, m);
649 KASSERT(m != NULL);
650
651 if (m->m_flags & M_PROTO1)
652 handle = HCI_MK_CON_HANDLE(link->hl_handle,
653 HCI_PACKET_START, 0);
654 else
655 handle = HCI_MK_CON_HANDLE(link->hl_handle,
656 HCI_PACKET_FRAGMENT, 0);
657
658 M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
659 if (m == NULL)
660 break;
661
662 hdr = mtod(m, hci_acldata_hdr_t *);
663 hdr->type = HCI_ACL_DATA_PKT;
664 hdr->con_handle = htole16(handle);
665 hdr->length = htole16(m->m_pkthdr.len - sizeof(*hdr));
666
667 link->hl_txqlen--;
668 pdu->lp_pending++;
669
670 hci_output_acl(unit, m);
671
672 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
673 if (pdu->lp_chan) {
674 /*
675 * This should enable streaming of PDUs - when
676 * we have placed all the fragments on the acl
677 * output queue, we trigger the L2CAP layer to
678 * send us down one more. Use a false state so
679 * we dont run into ourselves coming back from
680 * the future..
681 */
682 link->hl_state = HCI_LINK_BLOCK;
683 l2cap_start(pdu->lp_chan);
684 link->hl_state = HCI_LINK_OPEN;
685 }
686
687 pdu = TAILQ_NEXT(pdu, lp_next);
688 if (pdu == NULL)
689 break;
690 }
691 }
692
693 /*
694 * We had our turn now, move to the back of the queue to let
695 * other links have a go at the output buffers..
696 */
697 if (TAILQ_NEXT(link, hl_next)) {
698 TAILQ_REMOVE(&unit->hci_links, link, hl_next);
699 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
700 }
701 }
702
703 /*
704 * Confirm ACL packets cleared from Controller buffers. We scan our PDU
705 * list to clear pending fragments and signal upstream for more data
706 * when a PDU is complete.
707 */
708 void
709 hci_acl_complete(struct hci_link *link, int num)
710 {
711 struct l2cap_pdu *pdu;
712 struct l2cap_channel *chan;
713
714 DPRINTFN(5, "handle #%d (%d)\n", link->hl_handle, num);
715
716 while (num > 0) {
717 pdu = TAILQ_FIRST(&link->hl_txq);
718 if (pdu == NULL) {
719 aprint_error_dev(link->hl_unit->hci_dev,
720 "%d packets completed on handle #%x but none pending!\n",
721 num, link->hl_handle);
722
723 return;
724 }
725
726 if (num >= pdu->lp_pending) {
727 num -= pdu->lp_pending;
728 pdu->lp_pending = 0;
729
730 if (MBUFQ_FIRST(&pdu->lp_data) == NULL) {
731 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
732 chan = pdu->lp_chan;
733 if (chan != NULL) {
734 chan->lc_pending--;
735 (*chan->lc_proto->complete)
736 (chan->lc_upper, 1);
737
738 if (chan->lc_pending == 0)
739 l2cap_start(chan);
740 }
741
742 pool_put(&l2cap_pdu_pool, pdu);
743 }
744 } else {
745 pdu->lp_pending -= num;
746 num = 0;
747 }
748 }
749 }
750
751 /*******************************************************************************
752 *
753 * HCI SCO Connections
754 */
755
756 /*
757 * Incoming SCO Connection. We check the list for anybody willing
758 * to take it.
759 */
760 struct hci_link *
761 hci_sco_newconn(struct hci_unit *unit, bdaddr_t *bdaddr)
762 {
763 struct sockaddr_bt laddr, raddr;
764 struct sco_pcb *pcb, *new;
765 struct hci_link *sco, *acl;
766
767 memset(&laddr, 0, sizeof(laddr));
768 laddr.bt_len = sizeof(laddr);
769 laddr.bt_family = AF_BLUETOOTH;
770 bdaddr_copy(&laddr.bt_bdaddr, &unit->hci_bdaddr);
771
772 memset(&raddr, 0, sizeof(raddr));
773 raddr.bt_len = sizeof(raddr);
774 raddr.bt_family = AF_BLUETOOTH;
775 bdaddr_copy(&raddr.bt_bdaddr, bdaddr);
776
777 /*
778 * There should already be an ACL link up and running before
779 * the controller sends us SCO connection requests, but you
780 * never know..
781 */
782 acl = hci_link_lookup_bdaddr(unit, bdaddr, HCI_LINK_ACL);
783 if (acl == NULL || acl->hl_state != HCI_LINK_OPEN)
784 return NULL;
785
786 LIST_FOREACH(pcb, &sco_pcb, sp_next) {
787 if ((pcb->sp_flags & SP_LISTENING) == 0)
788 continue;
789
790 new = (*pcb->sp_proto->newconn)(pcb->sp_upper, &laddr, &raddr);
791 if (new == NULL)
792 continue;
793
794 /*
795 * Ok, got new pcb so we can start a new link and fill
796 * in all the details.
797 */
798 bdaddr_copy(&new->sp_laddr, &unit->hci_bdaddr);
799 bdaddr_copy(&new->sp_raddr, bdaddr);
800
801 sco = hci_link_alloc(unit, bdaddr, HCI_LINK_SCO);
802 if (sco == NULL) {
803 sco_detach(&new);
804 return NULL;
805 }
806
807 sco->hl_link = hci_acl_open(unit, bdaddr);
808 KASSERT(sco->hl_link == acl);
809
810 sco->hl_sco = new;
811 new->sp_link = sco;
812
813 new->sp_mtu = unit->hci_max_sco_size;
814 return sco;
815 }
816
817 return NULL;
818 }
819
820 /*
821 * receive SCO packet, we only need to strip the header and send
822 * it to the right handler
823 */
824 void
825 hci_sco_recv(struct mbuf *m, struct hci_unit *unit)
826 {
827 struct hci_link *link;
828 hci_scodata_hdr_t hdr;
829 uint16_t handle;
830
831 KASSERT(m != NULL);
832 KASSERT(unit != NULL);
833
834 KASSERT(m->m_pkthdr.len >= sizeof(hdr));
835 m_copydata(m, 0, sizeof(hdr), &hdr);
836 m_adj(m, sizeof(hdr));
837
838 #ifdef DIAGNOSTIC
839 if (hdr.type != HCI_SCO_DATA_PKT) {
840 aprint_error_dev(unit->hci_dev, "bad SCO packet type\n");
841 goto bad;
842 }
843
844 if (m->m_pkthdr.len != hdr.length) {
845 aprint_error_dev(unit->hci_dev,
846 "bad SCO packet length (%d != %d)\n",
847 m->m_pkthdr.len, hdr.length);
848
849 goto bad;
850 }
851 #endif
852
853 hdr.con_handle = le16toh(hdr.con_handle);
854 handle = HCI_CON_HANDLE(hdr.con_handle);
855
856 link = hci_link_lookup_handle(unit, handle);
857 if (link == NULL || link->hl_type == HCI_LINK_ACL) {
858 DPRINTF("%s: dumping packet for unknown handle #%d\n",
859 device_xname(unit->hci_dev), handle);
860
861 goto bad;
862 }
863
864 (*link->hl_sco->sp_proto->input)(link->hl_sco->sp_upper, m);
865 return;
866
867 bad:
868 m_freem(m);
869 }
870
871 void
872 hci_sco_start(struct hci_link *link)
873 {
874 }
875
876 /*
877 * SCO packets have completed at the controller, so we can
878 * signal up to free the buffer space.
879 */
880 void
881 hci_sco_complete(struct hci_link *link, int num)
882 {
883
884 DPRINTFN(5, "handle #%d (num=%d)\n", link->hl_handle, num);
885 link->hl_sco->sp_pending--;
886 (*link->hl_sco->sp_proto->complete)(link->hl_sco->sp_upper, num);
887 }
888
889 /*******************************************************************************
890 *
891 * Generic HCI Connection alloc/free/lookup etc
892 */
893
894 struct hci_link *
895 hci_link_alloc(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
896 {
897 struct hci_link *link;
898
899 KASSERT(unit != NULL);
900
901 link = malloc(sizeof(struct hci_link), M_BLUETOOTH, M_NOWAIT | M_ZERO);
902 if (link == NULL)
903 return NULL;
904
905 link->hl_unit = unit;
906 link->hl_type = type;
907 link->hl_state = HCI_LINK_CLOSED;
908 bdaddr_copy(&link->hl_bdaddr, bdaddr);
909
910 /* init ACL portion */
911 callout_init(&link->hl_expire, 0);
912 callout_setfunc(&link->hl_expire, hci_acl_timeout, link);
913
914 TAILQ_INIT(&link->hl_txq); /* outgoing packets */
915 TAILQ_INIT(&link->hl_reqs); /* request queue */
916
917 link->hl_mtu = L2CAP_MTU_DEFAULT; /* L2CAP signal mtu */
918 link->hl_flush = L2CAP_FLUSH_TIMO_DEFAULT; /* flush timeout */
919
920 /* init SCO portion */
921 MBUFQ_INIT(&link->hl_data);
922
923 /* attach to unit */
924 TAILQ_INSERT_TAIL(&unit->hci_links, link, hl_next);
925 return link;
926 }
927
928 void
929 hci_link_free(struct hci_link *link, int err)
930 {
931 struct l2cap_req *req;
932 struct l2cap_pdu *pdu;
933 struct l2cap_channel *chan, *next;
934
935 KASSERT(link != NULL);
936
937 DPRINTF("#%d, type = %d, state = %d, refcnt = %d\n",
938 link->hl_handle, link->hl_type,
939 link->hl_state, link->hl_refcnt);
940
941 /* ACL reference count */
942 if (link->hl_refcnt > 0) {
943 next = LIST_FIRST(&l2cap_active_list);
944 while ((chan = next) != NULL) {
945 next = LIST_NEXT(chan, lc_ncid);
946 if (chan->lc_link == link)
947 l2cap_close(chan, err);
948 }
949 }
950 KASSERT(link->hl_refcnt == 0);
951
952 /* ACL L2CAP requests.. */
953 while ((req = TAILQ_FIRST(&link->hl_reqs)) != NULL)
954 l2cap_request_free(req);
955
956 KASSERT(TAILQ_EMPTY(&link->hl_reqs));
957
958 /* ACL outgoing data queue */
959 while ((pdu = TAILQ_FIRST(&link->hl_txq)) != NULL) {
960 TAILQ_REMOVE(&link->hl_txq, pdu, lp_next);
961 MBUFQ_DRAIN(&pdu->lp_data);
962 if (pdu->lp_pending)
963 link->hl_unit->hci_num_acl_pkts += pdu->lp_pending;
964
965 pool_put(&l2cap_pdu_pool, pdu);
966 }
967
968 KASSERT(TAILQ_EMPTY(&link->hl_txq));
969
970 /* ACL incoming data packet */
971 if (link->hl_rxp != NULL) {
972 m_freem(link->hl_rxp);
973 link->hl_rxp = NULL;
974 }
975
976 /* SCO master ACL link */
977 if (link->hl_link != NULL) {
978 hci_acl_close(link->hl_link, err);
979 link->hl_link = NULL;
980 }
981
982 /* SCO pcb */
983 if (link->hl_sco != NULL) {
984 struct sco_pcb *pcb;
985
986 pcb = link->hl_sco;
987 pcb->sp_link = NULL;
988 link->hl_sco = NULL;
989 (*pcb->sp_proto->disconnected)(pcb->sp_upper, err);
990 }
991
992 /* flush any SCO data */
993 MBUFQ_DRAIN(&link->hl_data);
994
995 /*
996 * Halt the callout - if its already running we cannot free the
997 * link structure but the timeout function will call us back in
998 * any case.
999 */
1000 link->hl_state = HCI_LINK_CLOSED;
1001 callout_stop(&link->hl_expire);
1002 if (callout_invoking(&link->hl_expire))
1003 return;
1004
1005 callout_destroy(&link->hl_expire);
1006
1007 /*
1008 * If we made a note of clock offset, keep it in a memo
1009 * to facilitate reconnections to this device
1010 */
1011 if (link->hl_clock != 0) {
1012 struct hci_memo *memo;
1013
1014 memo = hci_memo_new(link->hl_unit, &link->hl_bdaddr);
1015 if (memo != NULL)
1016 memo->clock_offset = link->hl_clock;
1017 }
1018
1019 TAILQ_REMOVE(&link->hl_unit->hci_links, link, hl_next);
1020 free(link, M_BLUETOOTH);
1021 }
1022
1023 /*
1024 * Lookup HCI link by address and type. Note that for SCO links there may
1025 * be more than one link per address, so we only return links with no
1026 * handle (ie new links)
1027 */
1028 struct hci_link *
1029 hci_link_lookup_bdaddr(struct hci_unit *unit, bdaddr_t *bdaddr, uint8_t type)
1030 {
1031 struct hci_link *link;
1032
1033 KASSERT(unit != NULL);
1034 KASSERT(bdaddr != NULL);
1035
1036 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1037 if (link->hl_type != type)
1038 continue;
1039
1040 if (type == HCI_LINK_SCO && link->hl_handle != 0)
1041 continue;
1042
1043 if (bdaddr_same(&link->hl_bdaddr, bdaddr))
1044 break;
1045 }
1046
1047 return link;
1048 }
1049
1050 struct hci_link *
1051 hci_link_lookup_handle(struct hci_unit *unit, uint16_t handle)
1052 {
1053 struct hci_link *link;
1054
1055 KASSERT(unit != NULL);
1056
1057 TAILQ_FOREACH(link, &unit->hci_links, hl_next) {
1058 if (handle == link->hl_handle)
1059 break;
1060 }
1061
1062 return link;
1063 }
1064