2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
73 void hci_sock_set_flag(struct sock *sk, int nr)
75 set_bit(nr, &hci_pi(sk)->flags);
78 void hci_sock_clear_flag(struct sock *sk, int nr)
80 clear_bit(nr, &hci_pi(sk)->flags);
83 int hci_sock_test_flag(struct sock *sk, int nr)
85 return test_bit(nr, &hci_pi(sk)->flags);
88 unsigned short hci_sock_get_channel(struct sock *sk)
90 return hci_pi(sk)->channel;
93 u32 hci_sock_get_cookie(struct sock *sk)
95 return hci_pi(sk)->cookie;
98 static bool hci_sock_gen_cookie(struct sock *sk)
100 int id = hci_pi(sk)->cookie;
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
115 static void hci_sock_free_cookie(struct sock *sk)
117 int id = hci_pi(sk)->cookie;
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
125 static inline int hci_test_bit(int nr, const void *addr)
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
133 struct hci_sec_filter {
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
139 static const struct hci_sec_filter hci_sec_filter = {
143 { 0x1000d9fe, 0x0000b00c },
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166 struct hci_filter *flt;
167 int flt_type, flt_event;
170 flt = &hci_pi(sk)->filter;
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174 if (!test_bit(flt_type, &flt->type_mask))
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183 if (!hci_test_bit(flt_event, &flt->event_mask))
186 /* Check filter only when opcode is set */
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
205 struct sk_buff *skb_copy = NULL;
207 BT_DBG("hdev %p len %d", hdev, skb->len);
209 read_lock(&hci_sk_list.lock);
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
217 /* Don't send frame to the socket it came from */
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
227 if (is_filtered_packet(sk, skb))
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
237 /* Don't send frame to other channel types */
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
255 if (sock_queue_rcv_skb(sk, nskb))
259 read_unlock(&hci_sk_list.lock);
264 /* Send frame to sockets with specific channel */
265 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
270 BT_DBG("channel %u len %d", channel, skb->len);
272 sk_for_each(sk, &hci_sk_list.head) {
273 struct sk_buff *nskb;
275 /* Ignore socket without the flag set */
276 if (!hci_sock_test_flag(sk, flag))
279 /* Skip the original socket */
283 if (sk->sk_state != BT_BOUND)
286 if (hci_pi(sk)->channel != channel)
289 nskb = skb_clone(skb, GFP_ATOMIC);
293 if (sock_queue_rcv_skb(sk, nskb))
299 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
300 int flag, struct sock *skip_sk)
302 read_lock(&hci_sk_list.lock);
303 __hci_send_to_channel(channel, skb, flag, skip_sk);
304 read_unlock(&hci_sk_list.lock);
307 /* Send frame to monitor socket */
308 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
310 struct sk_buff *skb_copy = NULL;
311 struct hci_mon_hdr *hdr;
314 if (!atomic_read(&monitor_promisc))
317 BT_DBG("hdev %p len %d", hdev, skb->len);
319 switch (hci_skb_pkt_type(skb)) {
320 case HCI_COMMAND_PKT:
321 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
324 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
326 case HCI_ACLDATA_PKT:
327 if (bt_cb(skb)->incoming)
328 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
330 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
332 case HCI_SCODATA_PKT:
333 if (bt_cb(skb)->incoming)
334 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
336 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
339 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
345 /* Create a private copy with headroom */
346 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
350 /* Put header before the data */
351 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
352 hdr->opcode = opcode;
353 hdr->index = cpu_to_le16(hdev->id);
354 hdr->len = cpu_to_le16(skb->len);
356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
357 HCI_SOCK_TRUSTED, NULL);
361 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
362 void *data, u16 data_len, ktime_t tstamp,
363 int flag, struct sock *skip_sk)
369 index = cpu_to_le16(hdev->id);
371 index = cpu_to_le16(MGMT_INDEX_NONE);
373 read_lock(&hci_sk_list.lock);
375 sk_for_each(sk, &hci_sk_list.head) {
376 struct hci_mon_hdr *hdr;
379 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
382 /* Ignore socket without the flag set */
383 if (!hci_sock_test_flag(sk, flag))
386 /* Skip the original socket */
390 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
394 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
395 put_unaligned_le16(event, skb_put(skb, 2));
398 skb_put_data(skb, data, data_len);
400 skb->tstamp = tstamp;
402 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
407 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
408 HCI_SOCK_TRUSTED, NULL);
412 read_unlock(&hci_sk_list.lock);
415 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
417 struct hci_mon_hdr *hdr;
418 struct hci_mon_new_index *ni;
419 struct hci_mon_index_info *ii;
425 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
429 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
430 ni->type = hdev->dev_type;
432 bacpy(&ni->bdaddr, &hdev->bdaddr);
433 memcpy(ni->name, hdev->name, 8);
435 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
439 skb = bt_skb_alloc(0, GFP_ATOMIC);
443 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
447 if (hdev->manufacturer == 0xffff)
453 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
457 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
458 bacpy(&ii->bdaddr, &hdev->bdaddr);
459 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
461 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
465 skb = bt_skb_alloc(0, GFP_ATOMIC);
469 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
473 skb = bt_skb_alloc(0, GFP_ATOMIC);
477 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
484 __net_timestamp(skb);
486 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
487 hdr->opcode = opcode;
488 hdr->index = cpu_to_le16(hdev->id);
489 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
494 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
496 struct hci_mon_hdr *hdr;
502 /* No message needed when cookie is not present */
503 if (!hci_pi(sk)->cookie)
506 switch (hci_pi(sk)->channel) {
507 case HCI_CHANNEL_RAW:
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 case HCI_CHANNEL_USER:
514 ver[0] = BT_SUBSYS_VERSION;
515 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
517 case HCI_CHANNEL_CONTROL:
519 mgmt_fill_version_info(ver);
522 /* No message for unsupported format */
526 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
530 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
532 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
533 put_unaligned_le16(format, skb_put(skb, 2));
534 skb_put_data(skb, ver, sizeof(ver));
535 put_unaligned_le32(flags, skb_put(skb, 4));
536 skb_put_u8(skb, TASK_COMM_LEN);
537 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
539 __net_timestamp(skb);
541 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
542 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
543 if (hci_pi(sk)->hdev)
544 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
546 hdr->index = cpu_to_le16(HCI_DEV_NONE);
547 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
552 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
554 struct hci_mon_hdr *hdr;
557 /* No message needed when cookie is not present */
558 if (!hci_pi(sk)->cookie)
561 switch (hci_pi(sk)->channel) {
562 case HCI_CHANNEL_RAW:
563 case HCI_CHANNEL_USER:
564 case HCI_CHANNEL_CONTROL:
567 /* No message for unsupported format */
571 skb = bt_skb_alloc(4, GFP_ATOMIC);
575 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
577 __net_timestamp(skb);
579 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
580 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
581 if (hci_pi(sk)->hdev)
582 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
584 hdr->index = cpu_to_le16(HCI_DEV_NONE);
585 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
590 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
594 struct hci_mon_hdr *hdr;
597 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
601 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
602 put_unaligned_le16(opcode, skb_put(skb, 2));
605 skb_put_data(skb, buf, len);
607 __net_timestamp(skb);
609 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
610 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
611 hdr->index = cpu_to_le16(index);
612 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
617 static void __printf(2, 3)
618 send_monitor_note(struct sock *sk, const char *fmt, ...)
621 struct hci_mon_hdr *hdr;
626 len = vsnprintf(NULL, 0, fmt, args);
629 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
634 vsprintf(skb_put(skb, len), fmt, args);
635 *(u8 *)skb_put(skb, 1) = 0;
638 __net_timestamp(skb);
640 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
641 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
642 hdr->index = cpu_to_le16(HCI_DEV_NONE);
643 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
645 if (sock_queue_rcv_skb(sk, skb))
649 static void send_monitor_replay(struct sock *sk)
651 struct hci_dev *hdev;
653 read_lock(&hci_dev_list_lock);
655 list_for_each_entry(hdev, &hci_dev_list, list) {
658 skb = create_monitor_event(hdev, HCI_DEV_REG);
662 if (sock_queue_rcv_skb(sk, skb))
665 if (!test_bit(HCI_RUNNING, &hdev->flags))
668 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
672 if (sock_queue_rcv_skb(sk, skb))
675 if (test_bit(HCI_UP, &hdev->flags))
676 skb = create_monitor_event(hdev, HCI_DEV_UP);
677 else if (hci_dev_test_flag(hdev, HCI_SETUP))
678 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
683 if (sock_queue_rcv_skb(sk, skb))
688 read_unlock(&hci_dev_list_lock);
691 static void send_monitor_control_replay(struct sock *mon_sk)
695 read_lock(&hci_sk_list.lock);
697 sk_for_each(sk, &hci_sk_list.head) {
700 skb = create_monitor_ctrl_open(sk);
704 if (sock_queue_rcv_skb(mon_sk, skb))
708 read_unlock(&hci_sk_list.lock);
711 /* Generate internal stack event */
712 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
714 struct hci_event_hdr *hdr;
715 struct hci_ev_stack_internal *ev;
718 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
722 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
723 hdr->evt = HCI_EV_STACK_INTERNAL;
724 hdr->plen = sizeof(*ev) + dlen;
726 ev = skb_put(skb, sizeof(*ev) + dlen);
728 memcpy(ev->data, data, dlen);
730 bt_cb(skb)->incoming = 1;
731 __net_timestamp(skb);
733 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
734 hci_send_to_sock(hdev, skb);
738 void hci_sock_dev_event(struct hci_dev *hdev, int event)
740 BT_DBG("hdev %s event %d", hdev->name, event);
742 if (atomic_read(&monitor_promisc)) {
745 /* Send event to monitor */
746 skb = create_monitor_event(hdev, event);
748 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
749 HCI_SOCK_TRUSTED, NULL);
754 if (event <= HCI_DEV_DOWN) {
755 struct hci_ev_si_device ev;
757 /* Send event to sockets */
759 ev.dev_id = hdev->id;
760 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
763 if (event == HCI_DEV_UNREG) {
766 /* Wake up sockets using this dead device */
767 read_lock(&hci_sk_list.lock);
768 sk_for_each(sk, &hci_sk_list.head) {
769 if (hci_pi(sk)->hdev == hdev) {
771 sk->sk_state_change(sk);
774 read_unlock(&hci_sk_list.lock);
778 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
780 struct hci_mgmt_chan *c;
782 list_for_each_entry(c, &mgmt_chan_list, list) {
783 if (c->channel == channel)
790 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
792 struct hci_mgmt_chan *c;
794 mutex_lock(&mgmt_chan_list_lock);
795 c = __hci_mgmt_chan_find(channel);
796 mutex_unlock(&mgmt_chan_list_lock);
801 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
803 if (c->channel < HCI_CHANNEL_CONTROL)
806 mutex_lock(&mgmt_chan_list_lock);
807 if (__hci_mgmt_chan_find(c->channel)) {
808 mutex_unlock(&mgmt_chan_list_lock);
812 list_add_tail(&c->list, &mgmt_chan_list);
814 mutex_unlock(&mgmt_chan_list_lock);
818 EXPORT_SYMBOL(hci_mgmt_chan_register);
820 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
822 mutex_lock(&mgmt_chan_list_lock);
824 mutex_unlock(&mgmt_chan_list_lock);
826 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
828 static int hci_sock_release(struct socket *sock)
830 struct sock *sk = sock->sk;
831 struct hci_dev *hdev;
834 BT_DBG("sock %p sk %p", sock, sk);
841 switch (hci_pi(sk)->channel) {
842 case HCI_CHANNEL_MONITOR:
843 atomic_dec(&monitor_promisc);
845 case HCI_CHANNEL_RAW:
846 case HCI_CHANNEL_USER:
847 case HCI_CHANNEL_CONTROL:
848 /* Send event to monitor */
849 skb = create_monitor_ctrl_close(sk);
851 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
852 HCI_SOCK_TRUSTED, NULL);
856 hci_sock_free_cookie(sk);
860 bt_sock_unlink(&hci_sk_list, sk);
862 hdev = hci_pi(sk)->hdev;
864 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
865 /* When releasing a user channel exclusive access,
866 * call hci_dev_do_close directly instead of calling
867 * hci_dev_close to ensure the exclusive access will
868 * be released and the controller brought back down.
870 * The checking of HCI_AUTO_OFF is not needed in this
871 * case since it will have been cleared already when
872 * opening the user channel.
874 hci_dev_do_close(hdev);
875 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
876 mgmt_index_added(hdev);
879 atomic_dec(&hdev->promisc);
889 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
894 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
899 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
901 hci_dev_unlock(hdev);
906 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
911 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
916 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
918 hci_dev_unlock(hdev);
923 /* Ioctls that require bound socket */
924 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
927 struct hci_dev *hdev = hci_hdev_from_sock(sk);
930 return PTR_ERR(hdev);
932 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
935 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
938 if (hdev->dev_type != HCI_PRIMARY)
943 if (!capable(CAP_NET_ADMIN))
948 return hci_get_conn_info(hdev, (void __user *)arg);
951 return hci_get_auth_info(hdev, (void __user *)arg);
954 if (!capable(CAP_NET_ADMIN))
956 return hci_sock_blacklist_add(hdev, (void __user *)arg);
959 if (!capable(CAP_NET_ADMIN))
961 return hci_sock_blacklist_del(hdev, (void __user *)arg);
967 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
970 void __user *argp = (void __user *)arg;
971 struct sock *sk = sock->sk;
974 BT_DBG("cmd %x arg %lx", cmd, arg);
976 /* Make sure the cmd is valid before doing anything */
1001 return -ENOIOCTLCMD;
1006 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1011 /* When calling an ioctl on an unbound raw socket, then ensure
1012 * that the monitor gets informed. Ensure that the resulting event
1013 * is only send once by checking if the cookie exists or not. The
1014 * socket cookie will be only ever generated once for the lifetime
1015 * of a given socket.
1017 if (hci_sock_gen_cookie(sk)) {
1018 struct sk_buff *skb;
1020 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1021 * flag. Make sure that not only the current task but also
1022 * the socket opener has the required capability, since
1023 * privileged programs can be tricked into making ioctl calls
1024 * on HCI sockets, and the socket should not be marked as
1025 * trusted simply because the ioctl caller is privileged.
1027 if (sk_capable(sk, CAP_NET_ADMIN))
1028 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1030 /* Send event to monitor */
1031 skb = create_monitor_ctrl_open(sk);
1033 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1034 HCI_SOCK_TRUSTED, NULL);
1043 return hci_get_dev_list(argp);
1046 return hci_get_dev_info(argp);
1048 case HCIGETCONNLIST:
1049 return hci_get_conn_list(argp);
1052 if (!capable(CAP_NET_ADMIN))
1054 return hci_dev_open(arg);
1057 if (!capable(CAP_NET_ADMIN))
1059 return hci_dev_close(arg);
1062 if (!capable(CAP_NET_ADMIN))
1064 return hci_dev_reset(arg);
1067 if (!capable(CAP_NET_ADMIN))
1069 return hci_dev_reset_stat(arg);
1076 case HCISETLINKMODE:
1079 if (!capable(CAP_NET_ADMIN))
1081 return hci_dev_cmd(cmd, argp);
1084 return hci_inquiry(argp);
1089 err = hci_sock_bound_ioctl(sk, cmd, arg);
1096 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1099 struct sockaddr_hci haddr;
1100 struct sock *sk = sock->sk;
1101 struct hci_dev *hdev = NULL;
1102 struct sk_buff *skb;
1105 BT_DBG("sock %p sk %p", sock, sk);
1110 memset(&haddr, 0, sizeof(haddr));
1111 len = min_t(unsigned int, sizeof(haddr), addr_len);
1112 memcpy(&haddr, addr, len);
1114 if (haddr.hci_family != AF_BLUETOOTH)
1119 /* Allow detaching from dead device and attaching to alive device, if
1120 * the caller wants to re-bind (instead of close) this socket in
1121 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1123 hdev = hci_pi(sk)->hdev;
1124 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1125 hci_pi(sk)->hdev = NULL;
1126 sk->sk_state = BT_OPEN;
1131 if (sk->sk_state == BT_BOUND) {
1136 switch (haddr.hci_channel) {
1137 case HCI_CHANNEL_RAW:
1138 if (hci_pi(sk)->hdev) {
1143 if (haddr.hci_dev != HCI_DEV_NONE) {
1144 hdev = hci_dev_get(haddr.hci_dev);
1150 atomic_inc(&hdev->promisc);
1153 hci_pi(sk)->channel = haddr.hci_channel;
1155 if (!hci_sock_gen_cookie(sk)) {
1156 /* In the case when a cookie has already been assigned,
1157 * then there has been already an ioctl issued against
1158 * an unbound socket and with that triggerd an open
1159 * notification. Send a close notification first to
1160 * allow the state transition to bounded.
1162 skb = create_monitor_ctrl_close(sk);
1164 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1165 HCI_SOCK_TRUSTED, NULL);
1170 if (capable(CAP_NET_ADMIN))
1171 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1173 hci_pi(sk)->hdev = hdev;
1175 /* Send event to monitor */
1176 skb = create_monitor_ctrl_open(sk);
1178 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1179 HCI_SOCK_TRUSTED, NULL);
1184 case HCI_CHANNEL_USER:
1185 if (hci_pi(sk)->hdev) {
1190 if (haddr.hci_dev == HCI_DEV_NONE) {
1195 if (!capable(CAP_NET_ADMIN)) {
1200 hdev = hci_dev_get(haddr.hci_dev);
1206 if (test_bit(HCI_INIT, &hdev->flags) ||
1207 hci_dev_test_flag(hdev, HCI_SETUP) ||
1208 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1209 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1210 test_bit(HCI_UP, &hdev->flags))) {
1216 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1222 mgmt_index_removed(hdev);
1224 err = hci_dev_open(hdev->id);
1226 if (err == -EALREADY) {
1227 /* In case the transport is already up and
1228 * running, clear the error here.
1230 * This can happen when opening a user
1231 * channel and HCI_AUTO_OFF grace period
1236 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1237 mgmt_index_added(hdev);
1243 hci_pi(sk)->channel = haddr.hci_channel;
1245 if (!hci_sock_gen_cookie(sk)) {
1246 /* In the case when a cookie has already been assigned,
1247 * this socket will transition from a raw socket into
1248 * a user channel socket. For a clean transition, send
1249 * the close notification first.
1251 skb = create_monitor_ctrl_close(sk);
1253 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1254 HCI_SOCK_TRUSTED, NULL);
1259 /* The user channel is restricted to CAP_NET_ADMIN
1260 * capabilities and with that implicitly trusted.
1262 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264 hci_pi(sk)->hdev = hdev;
1266 /* Send event to monitor */
1267 skb = create_monitor_ctrl_open(sk);
1269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1270 HCI_SOCK_TRUSTED, NULL);
1274 atomic_inc(&hdev->promisc);
1277 case HCI_CHANNEL_MONITOR:
1278 if (haddr.hci_dev != HCI_DEV_NONE) {
1283 if (!capable(CAP_NET_RAW)) {
1288 hci_pi(sk)->channel = haddr.hci_channel;
1290 /* The monitor interface is restricted to CAP_NET_RAW
1291 * capabilities and with that implicitly trusted.
1293 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1295 send_monitor_note(sk, "Linux version %s (%s)",
1296 init_utsname()->release,
1297 init_utsname()->machine);
1298 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1299 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1300 send_monitor_replay(sk);
1301 send_monitor_control_replay(sk);
1303 atomic_inc(&monitor_promisc);
1306 case HCI_CHANNEL_LOGGING:
1307 if (haddr.hci_dev != HCI_DEV_NONE) {
1312 if (!capable(CAP_NET_ADMIN)) {
1317 hci_pi(sk)->channel = haddr.hci_channel;
1321 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1326 if (haddr.hci_dev != HCI_DEV_NONE) {
1331 /* Users with CAP_NET_ADMIN capabilities are allowed
1332 * access to all management commands and events. For
1333 * untrusted users the interface is restricted and
1334 * also only untrusted events are sent.
1336 if (capable(CAP_NET_ADMIN))
1337 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1339 hci_pi(sk)->channel = haddr.hci_channel;
1341 /* At the moment the index and unconfigured index events
1342 * are enabled unconditionally. Setting them on each
1343 * socket when binding keeps this functionality. They
1344 * however might be cleared later and then sending of these
1345 * events will be disabled, but that is then intentional.
1347 * This also enables generic events that are safe to be
1348 * received by untrusted users. Example for such events
1349 * are changes to settings, class of device, name etc.
1351 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1352 if (!hci_sock_gen_cookie(sk)) {
1353 /* In the case when a cookie has already been
1354 * assigned, this socket will transtion from
1355 * a raw socket into a control socket. To
1356 * allow for a clean transtion, send the
1357 * close notification first.
1359 skb = create_monitor_ctrl_close(sk);
1361 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1362 HCI_SOCK_TRUSTED, NULL);
1367 /* Send event to monitor */
1368 skb = create_monitor_ctrl_open(sk);
1370 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1371 HCI_SOCK_TRUSTED, NULL);
1375 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1376 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1377 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1378 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1379 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1380 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1385 sk->sk_state = BT_BOUND;
1392 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1395 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1396 struct sock *sk = sock->sk;
1397 struct hci_dev *hdev;
1400 BT_DBG("sock %p sk %p", sock, sk);
1407 hdev = hci_hdev_from_sock(sk);
1409 err = PTR_ERR(hdev);
1413 haddr->hci_family = AF_BLUETOOTH;
1414 haddr->hci_dev = hdev->id;
1415 haddr->hci_channel= hci_pi(sk)->channel;
1416 err = sizeof(*haddr);
1423 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1424 struct sk_buff *skb)
1426 __u32 mask = hci_pi(sk)->cmsg_mask;
1428 if (mask & HCI_CMSG_DIR) {
1429 int incoming = bt_cb(skb)->incoming;
1430 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1434 if (mask & HCI_CMSG_TSTAMP) {
1435 #ifdef CONFIG_COMPAT
1436 struct compat_timeval ctv;
1442 skb_get_timestamp(skb, &tv);
1446 #ifdef CONFIG_COMPAT
1447 if (!COMPAT_USE_64BIT_TIME &&
1448 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1449 ctv.tv_sec = tv.tv_sec;
1450 ctv.tv_usec = tv.tv_usec;
1456 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1460 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1461 size_t len, int flags)
1463 int noblock = flags & MSG_DONTWAIT;
1464 struct sock *sk = sock->sk;
1465 struct sk_buff *skb;
1467 unsigned int skblen;
1469 BT_DBG("sock %p, sk %p", sock, sk);
1471 if (flags & MSG_OOB)
1474 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1477 if (sk->sk_state == BT_CLOSED)
1480 skb = skb_recv_datagram(sk, flags, noblock, &err);
1487 msg->msg_flags |= MSG_TRUNC;
1491 skb_reset_transport_header(skb);
1492 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1494 switch (hci_pi(sk)->channel) {
1495 case HCI_CHANNEL_RAW:
1496 hci_sock_cmsg(sk, msg, skb);
1498 case HCI_CHANNEL_USER:
1499 case HCI_CHANNEL_MONITOR:
1500 sock_recv_timestamp(msg, sk, skb);
1503 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1504 sock_recv_timestamp(msg, sk, skb);
1508 skb_free_datagram(sk, skb);
1510 if (flags & MSG_TRUNC)
1513 return err ? : copied;
1516 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1517 struct msghdr *msg, size_t msglen)
1521 struct mgmt_hdr *hdr;
1522 u16 opcode, index, len;
1523 struct hci_dev *hdev = NULL;
1524 const struct hci_mgmt_handler *handler;
1525 bool var_len, no_hdev;
1528 BT_DBG("got %zu bytes", msglen);
1530 if (msglen < sizeof(*hdr))
1533 buf = kmalloc(msglen, GFP_KERNEL);
1537 if (memcpy_from_msg(buf, msg, msglen)) {
1543 opcode = __le16_to_cpu(hdr->opcode);
1544 index = __le16_to_cpu(hdr->index);
1545 len = __le16_to_cpu(hdr->len);
1547 if (len != msglen - sizeof(*hdr)) {
1552 if (chan->channel == HCI_CHANNEL_CONTROL) {
1553 struct sk_buff *skb;
1555 /* Send event to monitor */
1556 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1557 buf + sizeof(*hdr));
1559 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1560 HCI_SOCK_TRUSTED, NULL);
1565 if (opcode >= chan->handler_count ||
1566 chan->handlers[opcode].func == NULL) {
1567 BT_DBG("Unknown op %u", opcode);
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_UNKNOWN_COMMAND);
1573 handler = &chan->handlers[opcode];
1575 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1576 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1577 err = mgmt_cmd_status(sk, index, opcode,
1578 MGMT_STATUS_PERMISSION_DENIED);
1582 if (index != MGMT_INDEX_NONE) {
1583 hdev = hci_dev_get(index);
1585 err = mgmt_cmd_status(sk, index, opcode,
1586 MGMT_STATUS_INVALID_INDEX);
1590 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1591 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1592 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1593 err = mgmt_cmd_status(sk, index, opcode,
1594 MGMT_STATUS_INVALID_INDEX);
1598 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1599 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1600 err = mgmt_cmd_status(sk, index, opcode,
1601 MGMT_STATUS_INVALID_INDEX);
1606 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1607 if (no_hdev != !hdev) {
1608 err = mgmt_cmd_status(sk, index, opcode,
1609 MGMT_STATUS_INVALID_INDEX);
1613 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1614 if ((var_len && len < handler->data_len) ||
1615 (!var_len && len != handler->data_len)) {
1616 err = mgmt_cmd_status(sk, index, opcode,
1617 MGMT_STATUS_INVALID_PARAMS);
1621 if (hdev && chan->hdev_init)
1622 chan->hdev_init(sk, hdev);
1624 cp = buf + sizeof(*hdr);
1626 err = handler->func(sk, hdev, cp, len);
1640 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1642 struct hci_mon_hdr *hdr;
1643 struct sk_buff *skb;
1644 struct hci_dev *hdev;
1648 /* The logging frame consists at minimum of the standard header,
1649 * the priority byte, the ident length byte and at least one string
1650 * terminator NUL byte. Anything shorter are invalid packets.
1652 if (len < sizeof(*hdr) + 3)
1655 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1659 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1664 hdr = (void *)skb->data;
1666 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1671 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1672 __u8 priority = skb->data[sizeof(*hdr)];
1673 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1675 /* Only the priorities 0-7 are valid and with that any other
1676 * value results in an invalid packet.
1678 * The priority byte is followed by an ident length byte and
1679 * the NUL terminated ident string. Check that the ident
1680 * length is not overflowing the packet and also that the
1681 * ident string itself is NUL terminated. In case the ident
1682 * length is zero, the length value actually doubles as NUL
1683 * terminator identifier.
1685 * The message follows the ident string (if present) and
1686 * must be NUL terminated. Otherwise it is not a valid packet.
1688 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1689 ident_len > len - sizeof(*hdr) - 3 ||
1690 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1699 index = __le16_to_cpu(hdr->index);
1701 if (index != MGMT_INDEX_NONE) {
1702 hdev = hci_dev_get(index);
1711 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1713 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1724 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1727 struct sock *sk = sock->sk;
1728 struct hci_mgmt_chan *chan;
1729 struct hci_dev *hdev;
1730 struct sk_buff *skb;
1733 BT_DBG("sock %p sk %p", sock, sk);
1735 if (msg->msg_flags & MSG_OOB)
1738 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1742 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1747 switch (hci_pi(sk)->channel) {
1748 case HCI_CHANNEL_RAW:
1749 case HCI_CHANNEL_USER:
1751 case HCI_CHANNEL_MONITOR:
1754 case HCI_CHANNEL_LOGGING:
1755 err = hci_logging_frame(sk, msg, len);
1758 mutex_lock(&mgmt_chan_list_lock);
1759 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1761 err = hci_mgmt_cmd(chan, sk, msg, len);
1765 mutex_unlock(&mgmt_chan_list_lock);
1769 hdev = hci_hdev_from_sock(sk);
1771 err = PTR_ERR(hdev);
1775 if (!test_bit(HCI_UP, &hdev->flags)) {
1780 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1784 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1789 hci_skb_pkt_type(skb) = skb->data[0];
1792 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1793 /* No permission check is needed for user channel
1794 * since that gets enforced when binding the socket.
1796 * However check that the packet type is valid.
1798 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1799 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1800 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1805 skb_queue_tail(&hdev->raw_q, skb);
1806 queue_work(hdev->workqueue, &hdev->tx_work);
1807 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1808 u16 opcode = get_unaligned_le16(skb->data);
1809 u16 ogf = hci_opcode_ogf(opcode);
1810 u16 ocf = hci_opcode_ocf(opcode);
1812 if (((ogf > HCI_SFLT_MAX_OGF) ||
1813 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1814 &hci_sec_filter.ocf_mask[ogf])) &&
1815 !capable(CAP_NET_RAW)) {
1820 /* Since the opcode has already been extracted here, store
1821 * a copy of the value for later use by the drivers.
1823 hci_skb_opcode(skb) = opcode;
1826 skb_queue_tail(&hdev->raw_q, skb);
1827 queue_work(hdev->workqueue, &hdev->tx_work);
1829 /* Stand-alone HCI commands must be flagged as
1830 * single-command requests.
1832 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1834 skb_queue_tail(&hdev->cmd_q, skb);
1835 queue_work(hdev->workqueue, &hdev->cmd_work);
1838 if (!capable(CAP_NET_RAW)) {
1843 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1844 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1849 skb_queue_tail(&hdev->raw_q, skb);
1850 queue_work(hdev->workqueue, &hdev->tx_work);
1864 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1865 char __user *optval, unsigned int len)
1867 struct hci_ufilter uf = { .opcode = 0 };
1868 struct sock *sk = sock->sk;
1869 int err = 0, opt = 0;
1871 BT_DBG("sk %p, opt %d", sk, optname);
1873 if (level != SOL_HCI)
1874 return -ENOPROTOOPT;
1878 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1885 if (get_user(opt, (int __user *)optval)) {
1891 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1893 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1896 case HCI_TIME_STAMP:
1897 if (get_user(opt, (int __user *)optval)) {
1903 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1905 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1910 struct hci_filter *f = &hci_pi(sk)->filter;
1912 uf.type_mask = f->type_mask;
1913 uf.opcode = f->opcode;
1914 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1915 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1918 len = min_t(unsigned int, len, sizeof(uf));
1919 if (copy_from_user(&uf, optval, len)) {
1924 if (!capable(CAP_NET_RAW)) {
1925 uf.type_mask &= hci_sec_filter.type_mask;
1926 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1927 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1931 struct hci_filter *f = &hci_pi(sk)->filter;
1933 f->type_mask = uf.type_mask;
1934 f->opcode = uf.opcode;
1935 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1936 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1950 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1951 char __user *optval, int __user *optlen)
1953 struct hci_ufilter uf;
1954 struct sock *sk = sock->sk;
1955 int len, opt, err = 0;
1957 BT_DBG("sk %p, opt %d", sk, optname);
1959 if (level != SOL_HCI)
1960 return -ENOPROTOOPT;
1962 if (get_user(len, optlen))
1967 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1974 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1979 if (put_user(opt, optval))
1983 case HCI_TIME_STAMP:
1984 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1989 if (put_user(opt, optval))
1995 struct hci_filter *f = &hci_pi(sk)->filter;
1997 memset(&uf, 0, sizeof(uf));
1998 uf.type_mask = f->type_mask;
1999 uf.opcode = f->opcode;
2000 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2001 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2004 len = min_t(unsigned int, len, sizeof(uf));
2005 if (copy_to_user(optval, &uf, len))
2019 static void hci_sock_destruct(struct sock *sk)
2021 skb_queue_purge(&sk->sk_receive_queue);
2022 skb_queue_purge(&sk->sk_write_queue);
2025 static const struct proto_ops hci_sock_ops = {
2026 .family = PF_BLUETOOTH,
2027 .owner = THIS_MODULE,
2028 .release = hci_sock_release,
2029 .bind = hci_sock_bind,
2030 .getname = hci_sock_getname,
2031 .sendmsg = hci_sock_sendmsg,
2032 .recvmsg = hci_sock_recvmsg,
2033 .ioctl = hci_sock_ioctl,
2034 .poll = datagram_poll,
2035 .listen = sock_no_listen,
2036 .shutdown = sock_no_shutdown,
2037 .setsockopt = hci_sock_setsockopt,
2038 .getsockopt = hci_sock_getsockopt,
2039 .connect = sock_no_connect,
2040 .socketpair = sock_no_socketpair,
2041 .accept = sock_no_accept,
2042 .mmap = sock_no_mmap
2045 static struct proto hci_sk_proto = {
2047 .owner = THIS_MODULE,
2048 .obj_size = sizeof(struct hci_pinfo)
2051 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2056 BT_DBG("sock %p", sock);
2058 if (sock->type != SOCK_RAW)
2059 return -ESOCKTNOSUPPORT;
2061 sock->ops = &hci_sock_ops;
2063 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2067 sock_init_data(sock, sk);
2069 sock_reset_flag(sk, SOCK_ZAPPED);
2071 sk->sk_protocol = protocol;
2073 sock->state = SS_UNCONNECTED;
2074 sk->sk_state = BT_OPEN;
2075 sk->sk_destruct = hci_sock_destruct;
2077 bt_sock_link(&hci_sk_list, sk);
2081 static const struct net_proto_family hci_sock_family_ops = {
2082 .family = PF_BLUETOOTH,
2083 .owner = THIS_MODULE,
2084 .create = hci_sock_create,
2087 int __init hci_sock_init(void)
2091 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2093 err = proto_register(&hci_sk_proto, 0);
2097 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2099 BT_ERR("HCI socket registration failed");
2103 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2105 BT_ERR("Failed to create HCI proc file");
2106 bt_sock_unregister(BTPROTO_HCI);
2110 BT_INFO("HCI socket layer initialized");
2115 proto_unregister(&hci_sk_proto);
2119 void hci_sock_cleanup(void)
2121 bt_procfs_cleanup(&init_net, "hci");
2122 bt_sock_unregister(BTPROTO_HCI);
2123 proto_unregister(&hci_sk_proto);