GNU Linux-libre 4.9.337-gnu1
[releases.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52         struct bt_sock    bt;
53         struct hci_dev    *hdev;
54         struct hci_filter filter;
55         __u32             cmsg_mask;
56         unsigned short    channel;
57         unsigned long     flags;
58         __u32             cookie;
59         char              comm[TASK_COMM_LEN];
60 };
61
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64         struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66         if (!hdev)
67                 return ERR_PTR(-EBADFD);
68         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69                 return ERR_PTR(-EPIPE);
70         return hdev;
71 }
72
73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75         set_bit(nr, &hci_pi(sk)->flags);
76 }
77
78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80         clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85         return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90         return hci_pi(sk)->channel;
91 }
92
93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95         return hci_pi(sk)->cookie;
96 }
97
98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100         int id = hci_pi(sk)->cookie;
101
102         if (!id) {
103                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104                 if (id < 0)
105                         id = 0xffffffff;
106
107                 hci_pi(sk)->cookie = id;
108                 get_task_comm(hci_pi(sk)->comm, current);
109                 return true;
110         }
111
112         return false;
113 }
114
115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117         int id = hci_pi(sk)->cookie;
118
119         if (id) {
120                 hci_pi(sk)->cookie = 0xffffffff;
121                 ida_simple_remove(&sock_cookie_ida, id);
122         }
123 }
124
125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF  5
132
133 struct hci_sec_filter {
134         __u32 type_mask;
135         __u32 event_mask[2];
136         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140         /* Packet types */
141         0x10,
142         /* Events */
143         { 0x1000d9fe, 0x0000b00c },
144         /* Commands */
145         {
146                 { 0x0 },
147                 /* OGF_LINK_CTL */
148                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149                 /* OGF_LINK_POLICY */
150                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151                 /* OGF_HOST_CTL */
152                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153                 /* OGF_INFO_PARAM */
154                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155                 /* OGF_STATUS_PARAM */
156                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157         }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166         struct hci_filter *flt;
167         int flt_type, flt_event;
168
169         /* Apply filter */
170         flt = &hci_pi(sk)->filter;
171
172         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174         if (!test_bit(flt_type, &flt->type_mask))
175                 return true;
176
177         /* Extra filter for event packets only */
178         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179                 return false;
180
181         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183         if (!hci_test_bit(flt_event, &flt->event_mask))
184                 return true;
185
186         /* Check filter only when opcode is set */
187         if (!flt->opcode)
188                 return false;
189
190         if (flt_event == HCI_EV_CMD_COMPLETE &&
191             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192                 return true;
193
194         if (flt_event == HCI_EV_CMD_STATUS &&
195             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196                 return true;
197
198         return false;
199 }
200
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204         struct sock *sk;
205         struct sk_buff *skb_copy = NULL;
206
207         BT_DBG("hdev %p len %d", hdev, skb->len);
208
209         read_lock(&hci_sk_list.lock);
210
211         sk_for_each(sk, &hci_sk_list.head) {
212                 struct sk_buff *nskb;
213
214                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215                         continue;
216
217                 /* Don't send frame to the socket it came from */
218                 if (skb->sk == sk)
219                         continue;
220
221                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
226                                 continue;
227                         if (is_filtered_packet(sk, skb))
228                                 continue;
229                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230                         if (!bt_cb(skb)->incoming)
231                                 continue;
232                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
235                                 continue;
236                 } else {
237                         /* Don't send frame to other channel types */
238                         continue;
239                 }
240
241                 if (!skb_copy) {
242                         /* Create a private copy with headroom */
243                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
244                         if (!skb_copy)
245                                 continue;
246
247                         /* Put type byte before the data */
248                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
249                 }
250
251                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
252                 if (!nskb)
253                         continue;
254
255                 if (sock_queue_rcv_skb(sk, nskb))
256                         kfree_skb(nskb);
257         }
258
259         read_unlock(&hci_sk_list.lock);
260
261         kfree_skb(skb_copy);
262 }
263
264 /* Send frame to sockets with specific channel */
265 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266                          int flag, struct sock *skip_sk)
267 {
268         struct sock *sk;
269
270         BT_DBG("channel %u len %d", channel, skb->len);
271
272         read_lock(&hci_sk_list.lock);
273
274         sk_for_each(sk, &hci_sk_list.head) {
275                 struct sk_buff *nskb;
276
277                 /* Ignore socket without the flag set */
278                 if (!hci_sock_test_flag(sk, flag))
279                         continue;
280
281                 /* Skip the original socket */
282                 if (sk == skip_sk)
283                         continue;
284
285                 if (sk->sk_state != BT_BOUND)
286                         continue;
287
288                 if (hci_pi(sk)->channel != channel)
289                         continue;
290
291                 nskb = skb_clone(skb, GFP_ATOMIC);
292                 if (!nskb)
293                         continue;
294
295                 if (sock_queue_rcv_skb(sk, nskb))
296                         kfree_skb(nskb);
297         }
298
299         read_unlock(&hci_sk_list.lock);
300 }
301
302 /* Send frame to monitor socket */
303 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
304 {
305         struct sk_buff *skb_copy = NULL;
306         struct hci_mon_hdr *hdr;
307         __le16 opcode;
308
309         if (!atomic_read(&monitor_promisc))
310                 return;
311
312         BT_DBG("hdev %p len %d", hdev, skb->len);
313
314         switch (hci_skb_pkt_type(skb)) {
315         case HCI_COMMAND_PKT:
316                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
317                 break;
318         case HCI_EVENT_PKT:
319                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
320                 break;
321         case HCI_ACLDATA_PKT:
322                 if (bt_cb(skb)->incoming)
323                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
324                 else
325                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
326                 break;
327         case HCI_SCODATA_PKT:
328                 if (bt_cb(skb)->incoming)
329                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
330                 else
331                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
332                 break;
333         case HCI_DIAG_PKT:
334                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
335                 break;
336         default:
337                 return;
338         }
339
340         /* Create a private copy with headroom */
341         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
342         if (!skb_copy)
343                 return;
344
345         /* Put header before the data */
346         hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
347         hdr->opcode = opcode;
348         hdr->index = cpu_to_le16(hdev->id);
349         hdr->len = cpu_to_le16(skb->len);
350
351         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
352                             HCI_SOCK_TRUSTED, NULL);
353         kfree_skb(skb_copy);
354 }
355
356 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
357                                  void *data, u16 data_len, ktime_t tstamp,
358                                  int flag, struct sock *skip_sk)
359 {
360         struct sock *sk;
361         __le16 index;
362
363         if (hdev)
364                 index = cpu_to_le16(hdev->id);
365         else
366                 index = cpu_to_le16(MGMT_INDEX_NONE);
367
368         read_lock(&hci_sk_list.lock);
369
370         sk_for_each(sk, &hci_sk_list.head) {
371                 struct hci_mon_hdr *hdr;
372                 struct sk_buff *skb;
373
374                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
375                         continue;
376
377                 /* Ignore socket without the flag set */
378                 if (!hci_sock_test_flag(sk, flag))
379                         continue;
380
381                 /* Skip the original socket */
382                 if (sk == skip_sk)
383                         continue;
384
385                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
386                 if (!skb)
387                         continue;
388
389                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
390                 put_unaligned_le16(event, skb_put(skb, 2));
391
392                 if (data)
393                         memcpy(skb_put(skb, data_len), data, data_len);
394
395                 skb->tstamp = tstamp;
396
397                 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
398                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
399                 hdr->index = index;
400                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
401
402                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403                                     HCI_SOCK_TRUSTED, NULL);
404                 kfree_skb(skb);
405         }
406
407         read_unlock(&hci_sk_list.lock);
408 }
409
410 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
411 {
412         struct hci_mon_hdr *hdr;
413         struct hci_mon_new_index *ni;
414         struct hci_mon_index_info *ii;
415         struct sk_buff *skb;
416         __le16 opcode;
417
418         switch (event) {
419         case HCI_DEV_REG:
420                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
421                 if (!skb)
422                         return NULL;
423
424                 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
425                 ni->type = hdev->dev_type;
426                 ni->bus = hdev->bus;
427                 bacpy(&ni->bdaddr, &hdev->bdaddr);
428                 memcpy(ni->name, hdev->name, 8);
429
430                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
431                 break;
432
433         case HCI_DEV_UNREG:
434                 skb = bt_skb_alloc(0, GFP_ATOMIC);
435                 if (!skb)
436                         return NULL;
437
438                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
439                 break;
440
441         case HCI_DEV_SETUP:
442                 if (hdev->manufacturer == 0xffff)
443                         return NULL;
444
445                 /* fall through */
446
447         case HCI_DEV_UP:
448                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
449                 if (!skb)
450                         return NULL;
451
452                 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
453                 bacpy(&ii->bdaddr, &hdev->bdaddr);
454                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
455
456                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
457                 break;
458
459         case HCI_DEV_OPEN:
460                 skb = bt_skb_alloc(0, GFP_ATOMIC);
461                 if (!skb)
462                         return NULL;
463
464                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
465                 break;
466
467         case HCI_DEV_CLOSE:
468                 skb = bt_skb_alloc(0, GFP_ATOMIC);
469                 if (!skb)
470                         return NULL;
471
472                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
473                 break;
474
475         default:
476                 return NULL;
477         }
478
479         __net_timestamp(skb);
480
481         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
482         hdr->opcode = opcode;
483         hdr->index = cpu_to_le16(hdev->id);
484         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
485
486         return skb;
487 }
488
489 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
490 {
491         struct hci_mon_hdr *hdr;
492         struct sk_buff *skb;
493         u16 format;
494         u8 ver[3];
495         u32 flags;
496
497         /* No message needed when cookie is not present */
498         if (!hci_pi(sk)->cookie)
499                 return NULL;
500
501         switch (hci_pi(sk)->channel) {
502         case HCI_CHANNEL_RAW:
503                 format = 0x0000;
504                 ver[0] = BT_SUBSYS_VERSION;
505                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
506                 break;
507         case HCI_CHANNEL_USER:
508                 format = 0x0001;
509                 ver[0] = BT_SUBSYS_VERSION;
510                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
511                 break;
512         case HCI_CHANNEL_CONTROL:
513                 format = 0x0002;
514                 mgmt_fill_version_info(ver);
515                 break;
516         default:
517                 /* No message for unsupported format */
518                 return NULL;
519         }
520
521         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
522         if (!skb)
523                 return NULL;
524
525         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
526
527         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
528         put_unaligned_le16(format, skb_put(skb, 2));
529         memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
530         put_unaligned_le32(flags, skb_put(skb, 4));
531         *skb_put(skb, 1) = TASK_COMM_LEN;
532         memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
533
534         __net_timestamp(skb);
535
536         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
537         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
538         if (hci_pi(sk)->hdev)
539                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
540         else
541                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
542         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
543
544         return skb;
545 }
546
547 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
548 {
549         struct hci_mon_hdr *hdr;
550         struct sk_buff *skb;
551
552         /* No message needed when cookie is not present */
553         if (!hci_pi(sk)->cookie)
554                 return NULL;
555
556         switch (hci_pi(sk)->channel) {
557         case HCI_CHANNEL_RAW:
558         case HCI_CHANNEL_USER:
559         case HCI_CHANNEL_CONTROL:
560                 break;
561         default:
562                 /* No message for unsupported format */
563                 return NULL;
564         }
565
566         skb = bt_skb_alloc(4, GFP_ATOMIC);
567         if (!skb)
568                 return NULL;
569
570         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
571
572         __net_timestamp(skb);
573
574         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
575         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
576         if (hci_pi(sk)->hdev)
577                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
578         else
579                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
580         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
581
582         return skb;
583 }
584
585 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
586                                                    u16 opcode, u16 len,
587                                                    const void *buf)
588 {
589         struct hci_mon_hdr *hdr;
590         struct sk_buff *skb;
591
592         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
593         if (!skb)
594                 return NULL;
595
596         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
597         put_unaligned_le16(opcode, skb_put(skb, 2));
598
599         if (buf)
600                 memcpy(skb_put(skb, len), buf, len);
601
602         __net_timestamp(skb);
603
604         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
605         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
606         hdr->index = cpu_to_le16(index);
607         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
608
609         return skb;
610 }
611
612 static void __printf(2, 3)
613 send_monitor_note(struct sock *sk, const char *fmt, ...)
614 {
615         size_t len;
616         struct hci_mon_hdr *hdr;
617         struct sk_buff *skb;
618         va_list args;
619
620         va_start(args, fmt);
621         len = vsnprintf(NULL, 0, fmt, args);
622         va_end(args);
623
624         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
625         if (!skb)
626                 return;
627
628         va_start(args, fmt);
629         vsprintf(skb_put(skb, len), fmt, args);
630         *skb_put(skb, 1) = 0;
631         va_end(args);
632
633         __net_timestamp(skb);
634
635         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
636         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
637         hdr->index = cpu_to_le16(HCI_DEV_NONE);
638         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
639
640         if (sock_queue_rcv_skb(sk, skb))
641                 kfree_skb(skb);
642 }
643
644 static void send_monitor_replay(struct sock *sk)
645 {
646         struct hci_dev *hdev;
647
648         read_lock(&hci_dev_list_lock);
649
650         list_for_each_entry(hdev, &hci_dev_list, list) {
651                 struct sk_buff *skb;
652
653                 skb = create_monitor_event(hdev, HCI_DEV_REG);
654                 if (!skb)
655                         continue;
656
657                 if (sock_queue_rcv_skb(sk, skb))
658                         kfree_skb(skb);
659
660                 if (!test_bit(HCI_RUNNING, &hdev->flags))
661                         continue;
662
663                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
664                 if (!skb)
665                         continue;
666
667                 if (sock_queue_rcv_skb(sk, skb))
668                         kfree_skb(skb);
669
670                 if (test_bit(HCI_UP, &hdev->flags))
671                         skb = create_monitor_event(hdev, HCI_DEV_UP);
672                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
673                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
674                 else
675                         skb = NULL;
676
677                 if (skb) {
678                         if (sock_queue_rcv_skb(sk, skb))
679                                 kfree_skb(skb);
680                 }
681         }
682
683         read_unlock(&hci_dev_list_lock);
684 }
685
686 static void send_monitor_control_replay(struct sock *mon_sk)
687 {
688         struct sock *sk;
689
690         read_lock(&hci_sk_list.lock);
691
692         sk_for_each(sk, &hci_sk_list.head) {
693                 struct sk_buff *skb;
694
695                 skb = create_monitor_ctrl_open(sk);
696                 if (!skb)
697                         continue;
698
699                 if (sock_queue_rcv_skb(mon_sk, skb))
700                         kfree_skb(skb);
701         }
702
703         read_unlock(&hci_sk_list.lock);
704 }
705
706 /* Generate internal stack event */
707 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
708 {
709         struct hci_event_hdr *hdr;
710         struct hci_ev_stack_internal *ev;
711         struct sk_buff *skb;
712
713         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
714         if (!skb)
715                 return;
716
717         hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
718         hdr->evt  = HCI_EV_STACK_INTERNAL;
719         hdr->plen = sizeof(*ev) + dlen;
720
721         ev  = (void *)skb_put(skb, sizeof(*ev) + dlen);
722         ev->type = type;
723         memcpy(ev->data, data, dlen);
724
725         bt_cb(skb)->incoming = 1;
726         __net_timestamp(skb);
727
728         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
729         hci_send_to_sock(hdev, skb);
730         kfree_skb(skb);
731 }
732
733 void hci_sock_dev_event(struct hci_dev *hdev, int event)
734 {
735         BT_DBG("hdev %s event %d", hdev->name, event);
736
737         if (atomic_read(&monitor_promisc)) {
738                 struct sk_buff *skb;
739
740                 /* Send event to monitor */
741                 skb = create_monitor_event(hdev, event);
742                 if (skb) {
743                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
744                                             HCI_SOCK_TRUSTED, NULL);
745                         kfree_skb(skb);
746                 }
747         }
748
749         if (event <= HCI_DEV_DOWN) {
750                 struct hci_ev_si_device ev;
751
752                 /* Send event to sockets */
753                 ev.event  = event;
754                 ev.dev_id = hdev->id;
755                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
756         }
757
758         if (event == HCI_DEV_UNREG) {
759                 struct sock *sk;
760
761                 /* Wake up sockets using this dead device */
762                 read_lock(&hci_sk_list.lock);
763                 sk_for_each(sk, &hci_sk_list.head) {
764                         if (hci_pi(sk)->hdev == hdev) {
765                                 sk->sk_err = EPIPE;
766                                 sk->sk_state_change(sk);
767                         }
768                 }
769                 read_unlock(&hci_sk_list.lock);
770         }
771 }
772
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774 {
775         struct hci_mgmt_chan *c;
776
777         list_for_each_entry(c, &mgmt_chan_list, list) {
778                 if (c->channel == channel)
779                         return c;
780         }
781
782         return NULL;
783 }
784
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786 {
787         struct hci_mgmt_chan *c;
788
789         mutex_lock(&mgmt_chan_list_lock);
790         c = __hci_mgmt_chan_find(channel);
791         mutex_unlock(&mgmt_chan_list_lock);
792
793         return c;
794 }
795
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797 {
798         if (c->channel < HCI_CHANNEL_CONTROL)
799                 return -EINVAL;
800
801         mutex_lock(&mgmt_chan_list_lock);
802         if (__hci_mgmt_chan_find(c->channel)) {
803                 mutex_unlock(&mgmt_chan_list_lock);
804                 return -EALREADY;
805         }
806
807         list_add_tail(&c->list, &mgmt_chan_list);
808
809         mutex_unlock(&mgmt_chan_list_lock);
810
811         return 0;
812 }
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
814
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816 {
817         mutex_lock(&mgmt_chan_list_lock);
818         list_del(&c->list);
819         mutex_unlock(&mgmt_chan_list_lock);
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822
823 static int hci_sock_release(struct socket *sock)
824 {
825         struct sock *sk = sock->sk;
826         struct hci_dev *hdev;
827         struct sk_buff *skb;
828
829         BT_DBG("sock %p sk %p", sock, sk);
830
831         if (!sk)
832                 return 0;
833
834         lock_sock(sk);
835
836         switch (hci_pi(sk)->channel) {
837         case HCI_CHANNEL_MONITOR:
838                 atomic_dec(&monitor_promisc);
839                 break;
840         case HCI_CHANNEL_RAW:
841         case HCI_CHANNEL_USER:
842         case HCI_CHANNEL_CONTROL:
843                 /* Send event to monitor */
844                 skb = create_monitor_ctrl_close(sk);
845                 if (skb) {
846                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847                                             HCI_SOCK_TRUSTED, NULL);
848                         kfree_skb(skb);
849                 }
850
851                 hci_sock_free_cookie(sk);
852                 break;
853         }
854
855         bt_sock_unlink(&hci_sk_list, sk);
856
857         hdev = hci_pi(sk)->hdev;
858         if (hdev) {
859                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
860                         /* When releasing an user channel exclusive access,
861                          * call hci_dev_do_close directly instead of calling
862                          * hci_dev_close to ensure the exclusive access will
863                          * be released and the controller brought back down.
864                          *
865                          * The checking of HCI_AUTO_OFF is not needed in this
866                          * case since it will have been cleared already when
867                          * opening the user channel.
868                          */
869                         hci_dev_do_close(hdev);
870                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871                         mgmt_index_added(hdev);
872                 }
873
874                 atomic_dec(&hdev->promisc);
875                 hci_dev_put(hdev);
876         }
877
878         sock_orphan(sk);
879
880         skb_queue_purge(&sk->sk_receive_queue);
881         skb_queue_purge(&sk->sk_write_queue);
882
883         release_sock(sk);
884         sock_put(sk);
885         return 0;
886 }
887
888 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
889 {
890         bdaddr_t bdaddr;
891         int err;
892
893         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
894                 return -EFAULT;
895
896         hci_dev_lock(hdev);
897
898         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
899
900         hci_dev_unlock(hdev);
901
902         return err;
903 }
904
905 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
906 {
907         bdaddr_t bdaddr;
908         int err;
909
910         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
911                 return -EFAULT;
912
913         hci_dev_lock(hdev);
914
915         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
916
917         hci_dev_unlock(hdev);
918
919         return err;
920 }
921
922 /* Ioctls that require bound socket */
923 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
924                                 unsigned long arg)
925 {
926         struct hci_dev *hdev = hci_hdev_from_sock(sk);
927
928         if (IS_ERR(hdev))
929                 return PTR_ERR(hdev);
930
931         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
932                 return -EBUSY;
933
934         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
935                 return -EOPNOTSUPP;
936
937         if (hdev->dev_type != HCI_PRIMARY)
938                 return -EOPNOTSUPP;
939
940         switch (cmd) {
941         case HCISETRAW:
942                 if (!capable(CAP_NET_ADMIN))
943                         return -EPERM;
944                 return -EOPNOTSUPP;
945
946         case HCIGETCONNINFO:
947                 return hci_get_conn_info(hdev, (void __user *)arg);
948
949         case HCIGETAUTHINFO:
950                 return hci_get_auth_info(hdev, (void __user *)arg);
951
952         case HCIBLOCKADDR:
953                 if (!capable(CAP_NET_ADMIN))
954                         return -EPERM;
955                 return hci_sock_blacklist_add(hdev, (void __user *)arg);
956
957         case HCIUNBLOCKADDR:
958                 if (!capable(CAP_NET_ADMIN))
959                         return -EPERM;
960                 return hci_sock_blacklist_del(hdev, (void __user *)arg);
961         }
962
963         return -ENOIOCTLCMD;
964 }
965
966 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
967                           unsigned long arg)
968 {
969         void __user *argp = (void __user *)arg;
970         struct sock *sk = sock->sk;
971         int err;
972
973         BT_DBG("cmd %x arg %lx", cmd, arg);
974
975         lock_sock(sk);
976
977         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
978                 err = -EBADFD;
979                 goto done;
980         }
981
982         /* When calling an ioctl on an unbound raw socket, then ensure
983          * that the monitor gets informed. Ensure that the resulting event
984          * is only send once by checking if the cookie exists or not. The
985          * socket cookie will be only ever generated once for the lifetime
986          * of a given socket.
987          */
988         if (hci_sock_gen_cookie(sk)) {
989                 struct sk_buff *skb;
990
991                 if (capable(CAP_NET_ADMIN))
992                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
993
994                 /* Send event to monitor */
995                 skb = create_monitor_ctrl_open(sk);
996                 if (skb) {
997                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
998                                             HCI_SOCK_TRUSTED, NULL);
999                         kfree_skb(skb);
1000                 }
1001         }
1002
1003         release_sock(sk);
1004
1005         switch (cmd) {
1006         case HCIGETDEVLIST:
1007                 return hci_get_dev_list(argp);
1008
1009         case HCIGETDEVINFO:
1010                 return hci_get_dev_info(argp);
1011
1012         case HCIGETCONNLIST:
1013                 return hci_get_conn_list(argp);
1014
1015         case HCIDEVUP:
1016                 if (!capable(CAP_NET_ADMIN))
1017                         return -EPERM;
1018                 return hci_dev_open(arg);
1019
1020         case HCIDEVDOWN:
1021                 if (!capable(CAP_NET_ADMIN))
1022                         return -EPERM;
1023                 return hci_dev_close(arg);
1024
1025         case HCIDEVRESET:
1026                 if (!capable(CAP_NET_ADMIN))
1027                         return -EPERM;
1028                 return hci_dev_reset(arg);
1029
1030         case HCIDEVRESTAT:
1031                 if (!capable(CAP_NET_ADMIN))
1032                         return -EPERM;
1033                 return hci_dev_reset_stat(arg);
1034
1035         case HCISETSCAN:
1036         case HCISETAUTH:
1037         case HCISETENCRYPT:
1038         case HCISETPTYPE:
1039         case HCISETLINKPOL:
1040         case HCISETLINKMODE:
1041         case HCISETACLMTU:
1042         case HCISETSCOMTU:
1043                 if (!capable(CAP_NET_ADMIN))
1044                         return -EPERM;
1045                 return hci_dev_cmd(cmd, argp);
1046
1047         case HCIINQUIRY:
1048                 return hci_inquiry(argp);
1049         }
1050
1051         lock_sock(sk);
1052
1053         err = hci_sock_bound_ioctl(sk, cmd, arg);
1054
1055 done:
1056         release_sock(sk);
1057         return err;
1058 }
1059
1060 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1061                          int addr_len)
1062 {
1063         struct sockaddr_hci haddr;
1064         struct sock *sk = sock->sk;
1065         struct hci_dev *hdev = NULL;
1066         struct sk_buff *skb;
1067         int len, err = 0;
1068
1069         BT_DBG("sock %p sk %p", sock, sk);
1070
1071         if (!addr)
1072                 return -EINVAL;
1073
1074         memset(&haddr, 0, sizeof(haddr));
1075         len = min_t(unsigned int, sizeof(haddr), addr_len);
1076         memcpy(&haddr, addr, len);
1077
1078         if (haddr.hci_family != AF_BLUETOOTH)
1079                 return -EINVAL;
1080
1081         lock_sock(sk);
1082
1083         /* Allow detaching from dead device and attaching to alive device, if
1084          * the caller wants to re-bind (instead of close) this socket in
1085          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1086          */
1087         hdev = hci_pi(sk)->hdev;
1088         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1089                 hci_pi(sk)->hdev = NULL;
1090                 sk->sk_state = BT_OPEN;
1091                 hci_dev_put(hdev);
1092         }
1093         hdev = NULL;
1094
1095         if (sk->sk_state == BT_BOUND) {
1096                 err = -EALREADY;
1097                 goto done;
1098         }
1099
1100         switch (haddr.hci_channel) {
1101         case HCI_CHANNEL_RAW:
1102                 if (hci_pi(sk)->hdev) {
1103                         err = -EALREADY;
1104                         goto done;
1105                 }
1106
1107                 if (haddr.hci_dev != HCI_DEV_NONE) {
1108                         hdev = hci_dev_get(haddr.hci_dev);
1109                         if (!hdev) {
1110                                 err = -ENODEV;
1111                                 goto done;
1112                         }
1113
1114                         atomic_inc(&hdev->promisc);
1115                 }
1116
1117                 hci_pi(sk)->channel = haddr.hci_channel;
1118
1119                 if (!hci_sock_gen_cookie(sk)) {
1120                         /* In the case when a cookie has already been assigned,
1121                          * then there has been already an ioctl issued against
1122                          * an unbound socket and with that triggerd an open
1123                          * notification. Send a close notification first to
1124                          * allow the state transition to bounded.
1125                          */
1126                         skb = create_monitor_ctrl_close(sk);
1127                         if (skb) {
1128                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1129                                                     HCI_SOCK_TRUSTED, NULL);
1130                                 kfree_skb(skb);
1131                         }
1132                 }
1133
1134                 if (capable(CAP_NET_ADMIN))
1135                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1136
1137                 hci_pi(sk)->hdev = hdev;
1138
1139                 /* Send event to monitor */
1140                 skb = create_monitor_ctrl_open(sk);
1141                 if (skb) {
1142                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1143                                             HCI_SOCK_TRUSTED, NULL);
1144                         kfree_skb(skb);
1145                 }
1146                 break;
1147
1148         case HCI_CHANNEL_USER:
1149                 if (hci_pi(sk)->hdev) {
1150                         err = -EALREADY;
1151                         goto done;
1152                 }
1153
1154                 if (haddr.hci_dev == HCI_DEV_NONE) {
1155                         err = -EINVAL;
1156                         goto done;
1157                 }
1158
1159                 if (!capable(CAP_NET_ADMIN)) {
1160                         err = -EPERM;
1161                         goto done;
1162                 }
1163
1164                 hdev = hci_dev_get(haddr.hci_dev);
1165                 if (!hdev) {
1166                         err = -ENODEV;
1167                         goto done;
1168                 }
1169
1170                 if (test_bit(HCI_INIT, &hdev->flags) ||
1171                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1172                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1173                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1174                      test_bit(HCI_UP, &hdev->flags))) {
1175                         err = -EBUSY;
1176                         hci_dev_put(hdev);
1177                         goto done;
1178                 }
1179
1180                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1181                         err = -EUSERS;
1182                         hci_dev_put(hdev);
1183                         goto done;
1184                 }
1185
1186                 mgmt_index_removed(hdev);
1187
1188                 err = hci_dev_open(hdev->id);
1189                 if (err) {
1190                         if (err == -EALREADY) {
1191                                 /* In case the transport is already up and
1192                                  * running, clear the error here.
1193                                  *
1194                                  * This can happen when opening an user
1195                                  * channel and HCI_AUTO_OFF grace period
1196                                  * is still active.
1197                                  */
1198                                 err = 0;
1199                         } else {
1200                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1201                                 mgmt_index_added(hdev);
1202                                 hci_dev_put(hdev);
1203                                 goto done;
1204                         }
1205                 }
1206
1207                 hci_pi(sk)->channel = haddr.hci_channel;
1208
1209                 if (!hci_sock_gen_cookie(sk)) {
1210                         /* In the case when a cookie has already been assigned,
1211                          * this socket will transition from a raw socket into
1212                          * an user channel socket. For a clean transition, send
1213                          * the close notification first.
1214                          */
1215                         skb = create_monitor_ctrl_close(sk);
1216                         if (skb) {
1217                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1218                                                     HCI_SOCK_TRUSTED, NULL);
1219                                 kfree_skb(skb);
1220                         }
1221                 }
1222
1223                 /* The user channel is restricted to CAP_NET_ADMIN
1224                  * capabilities and with that implicitly trusted.
1225                  */
1226                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1227
1228                 hci_pi(sk)->hdev = hdev;
1229
1230                 /* Send event to monitor */
1231                 skb = create_monitor_ctrl_open(sk);
1232                 if (skb) {
1233                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1234                                             HCI_SOCK_TRUSTED, NULL);
1235                         kfree_skb(skb);
1236                 }
1237
1238                 atomic_inc(&hdev->promisc);
1239                 break;
1240
1241         case HCI_CHANNEL_MONITOR:
1242                 if (haddr.hci_dev != HCI_DEV_NONE) {
1243                         err = -EINVAL;
1244                         goto done;
1245                 }
1246
1247                 if (!capable(CAP_NET_RAW)) {
1248                         err = -EPERM;
1249                         goto done;
1250                 }
1251
1252                 hci_pi(sk)->channel = haddr.hci_channel;
1253
1254                 /* The monitor interface is restricted to CAP_NET_RAW
1255                  * capabilities and with that implicitly trusted.
1256                  */
1257                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1258
1259                 send_monitor_note(sk, "Linux version %s (%s)",
1260                                   init_utsname()->release,
1261                                   init_utsname()->machine);
1262                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1263                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1264                 send_monitor_replay(sk);
1265                 send_monitor_control_replay(sk);
1266
1267                 atomic_inc(&monitor_promisc);
1268                 break;
1269
1270         case HCI_CHANNEL_LOGGING:
1271                 if (haddr.hci_dev != HCI_DEV_NONE) {
1272                         err = -EINVAL;
1273                         goto done;
1274                 }
1275
1276                 if (!capable(CAP_NET_ADMIN)) {
1277                         err = -EPERM;
1278                         goto done;
1279                 }
1280
1281                 hci_pi(sk)->channel = haddr.hci_channel;
1282                 break;
1283
1284         default:
1285                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1286                         err = -EINVAL;
1287                         goto done;
1288                 }
1289
1290                 if (haddr.hci_dev != HCI_DEV_NONE) {
1291                         err = -EINVAL;
1292                         goto done;
1293                 }
1294
1295                 /* Users with CAP_NET_ADMIN capabilities are allowed
1296                  * access to all management commands and events. For
1297                  * untrusted users the interface is restricted and
1298                  * also only untrusted events are sent.
1299                  */
1300                 if (capable(CAP_NET_ADMIN))
1301                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1302
1303                 hci_pi(sk)->channel = haddr.hci_channel;
1304
1305                 /* At the moment the index and unconfigured index events
1306                  * are enabled unconditionally. Setting them on each
1307                  * socket when binding keeps this functionality. They
1308                  * however might be cleared later and then sending of these
1309                  * events will be disabled, but that is then intentional.
1310                  *
1311                  * This also enables generic events that are safe to be
1312                  * received by untrusted users. Example for such events
1313                  * are changes to settings, class of device, name etc.
1314                  */
1315                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1316                         if (!hci_sock_gen_cookie(sk)) {
1317                                 /* In the case when a cookie has already been
1318                                  * assigned, this socket will transtion from
1319                                  * a raw socket into a control socket. To
1320                                  * allow for a clean transtion, send the
1321                                  * close notification first.
1322                                  */
1323                                 skb = create_monitor_ctrl_close(sk);
1324                                 if (skb) {
1325                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1326                                                             HCI_SOCK_TRUSTED, NULL);
1327                                         kfree_skb(skb);
1328                                 }
1329                         }
1330
1331                         /* Send event to monitor */
1332                         skb = create_monitor_ctrl_open(sk);
1333                         if (skb) {
1334                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1335                                                     HCI_SOCK_TRUSTED, NULL);
1336                                 kfree_skb(skb);
1337                         }
1338
1339                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1340                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1341                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1342                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1343                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1344                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1345                 }
1346                 break;
1347         }
1348
1349         sk->sk_state = BT_BOUND;
1350
1351 done:
1352         release_sock(sk);
1353         return err;
1354 }
1355
1356 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1357                             int *addr_len, int peer)
1358 {
1359         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1360         struct sock *sk = sock->sk;
1361         struct hci_dev *hdev;
1362         int err = 0;
1363
1364         BT_DBG("sock %p sk %p", sock, sk);
1365
1366         if (peer)
1367                 return -EOPNOTSUPP;
1368
1369         lock_sock(sk);
1370
1371         hdev = hci_hdev_from_sock(sk);
1372         if (IS_ERR(hdev)) {
1373                 err = PTR_ERR(hdev);
1374                 goto done;
1375         }
1376
1377         *addr_len = sizeof(*haddr);
1378         haddr->hci_family = AF_BLUETOOTH;
1379         haddr->hci_dev    = hdev->id;
1380         haddr->hci_channel= hci_pi(sk)->channel;
1381
1382 done:
1383         release_sock(sk);
1384         return err;
1385 }
1386
1387 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1388                           struct sk_buff *skb)
1389 {
1390         __u32 mask = hci_pi(sk)->cmsg_mask;
1391
1392         if (mask & HCI_CMSG_DIR) {
1393                 int incoming = bt_cb(skb)->incoming;
1394                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1395                          &incoming);
1396         }
1397
1398         if (mask & HCI_CMSG_TSTAMP) {
1399 #ifdef CONFIG_COMPAT
1400                 struct compat_timeval ctv;
1401 #endif
1402                 struct timeval tv;
1403                 void *data;
1404                 int len;
1405
1406                 skb_get_timestamp(skb, &tv);
1407
1408                 data = &tv;
1409                 len = sizeof(tv);
1410 #ifdef CONFIG_COMPAT
1411                 if (!COMPAT_USE_64BIT_TIME &&
1412                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1413                         ctv.tv_sec = tv.tv_sec;
1414                         ctv.tv_usec = tv.tv_usec;
1415                         data = &ctv;
1416                         len = sizeof(ctv);
1417                 }
1418 #endif
1419
1420                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1421         }
1422 }
1423
1424 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1425                             size_t len, int flags)
1426 {
1427         int noblock = flags & MSG_DONTWAIT;
1428         struct sock *sk = sock->sk;
1429         struct sk_buff *skb;
1430         int copied, err;
1431         unsigned int skblen;
1432
1433         BT_DBG("sock %p, sk %p", sock, sk);
1434
1435         if (flags & MSG_OOB)
1436                 return -EOPNOTSUPP;
1437
1438         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1439                 return -EOPNOTSUPP;
1440
1441         if (sk->sk_state == BT_CLOSED)
1442                 return 0;
1443
1444         skb = skb_recv_datagram(sk, flags, noblock, &err);
1445         if (!skb)
1446                 return err;
1447
1448         skblen = skb->len;
1449         copied = skb->len;
1450         if (len < copied) {
1451                 msg->msg_flags |= MSG_TRUNC;
1452                 copied = len;
1453         }
1454
1455         skb_reset_transport_header(skb);
1456         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1457
1458         switch (hci_pi(sk)->channel) {
1459         case HCI_CHANNEL_RAW:
1460                 hci_sock_cmsg(sk, msg, skb);
1461                 break;
1462         case HCI_CHANNEL_USER:
1463         case HCI_CHANNEL_MONITOR:
1464                 sock_recv_timestamp(msg, sk, skb);
1465                 break;
1466         default:
1467                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1468                         sock_recv_timestamp(msg, sk, skb);
1469                 break;
1470         }
1471
1472         skb_free_datagram(sk, skb);
1473
1474         if (flags & MSG_TRUNC)
1475                 copied = skblen;
1476
1477         return err ? : copied;
1478 }
1479
1480 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1481                         struct msghdr *msg, size_t msglen)
1482 {
1483         void *buf;
1484         u8 *cp;
1485         struct mgmt_hdr *hdr;
1486         u16 opcode, index, len;
1487         struct hci_dev *hdev = NULL;
1488         const struct hci_mgmt_handler *handler;
1489         bool var_len, no_hdev;
1490         int err;
1491
1492         BT_DBG("got %zu bytes", msglen);
1493
1494         if (msglen < sizeof(*hdr))
1495                 return -EINVAL;
1496
1497         buf = kmalloc(msglen, GFP_KERNEL);
1498         if (!buf)
1499                 return -ENOMEM;
1500
1501         if (memcpy_from_msg(buf, msg, msglen)) {
1502                 err = -EFAULT;
1503                 goto done;
1504         }
1505
1506         hdr = buf;
1507         opcode = __le16_to_cpu(hdr->opcode);
1508         index = __le16_to_cpu(hdr->index);
1509         len = __le16_to_cpu(hdr->len);
1510
1511         if (len != msglen - sizeof(*hdr)) {
1512                 err = -EINVAL;
1513                 goto done;
1514         }
1515
1516         if (chan->channel == HCI_CHANNEL_CONTROL) {
1517                 struct sk_buff *skb;
1518
1519                 /* Send event to monitor */
1520                 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1521                                                   buf + sizeof(*hdr));
1522                 if (skb) {
1523                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1524                                             HCI_SOCK_TRUSTED, NULL);
1525                         kfree_skb(skb);
1526                 }
1527         }
1528
1529         if (opcode >= chan->handler_count ||
1530             chan->handlers[opcode].func == NULL) {
1531                 BT_DBG("Unknown op %u", opcode);
1532                 err = mgmt_cmd_status(sk, index, opcode,
1533                                       MGMT_STATUS_UNKNOWN_COMMAND);
1534                 goto done;
1535         }
1536
1537         handler = &chan->handlers[opcode];
1538
1539         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1540             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1541                 err = mgmt_cmd_status(sk, index, opcode,
1542                                       MGMT_STATUS_PERMISSION_DENIED);
1543                 goto done;
1544         }
1545
1546         if (index != MGMT_INDEX_NONE) {
1547                 hdev = hci_dev_get(index);
1548                 if (!hdev) {
1549                         err = mgmt_cmd_status(sk, index, opcode,
1550                                               MGMT_STATUS_INVALID_INDEX);
1551                         goto done;
1552                 }
1553
1554                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1555                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1556                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1557                         err = mgmt_cmd_status(sk, index, opcode,
1558                                               MGMT_STATUS_INVALID_INDEX);
1559                         goto done;
1560                 }
1561
1562                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1563                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1564                         err = mgmt_cmd_status(sk, index, opcode,
1565                                               MGMT_STATUS_INVALID_INDEX);
1566                         goto done;
1567                 }
1568         }
1569
1570         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1571         if (no_hdev != !hdev) {
1572                 err = mgmt_cmd_status(sk, index, opcode,
1573                                       MGMT_STATUS_INVALID_INDEX);
1574                 goto done;
1575         }
1576
1577         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1578         if ((var_len && len < handler->data_len) ||
1579             (!var_len && len != handler->data_len)) {
1580                 err = mgmt_cmd_status(sk, index, opcode,
1581                                       MGMT_STATUS_INVALID_PARAMS);
1582                 goto done;
1583         }
1584
1585         if (hdev && chan->hdev_init)
1586                 chan->hdev_init(sk, hdev);
1587
1588         cp = buf + sizeof(*hdr);
1589
1590         err = handler->func(sk, hdev, cp, len);
1591         if (err < 0)
1592                 goto done;
1593
1594         err = msglen;
1595
1596 done:
1597         if (hdev)
1598                 hci_dev_put(hdev);
1599
1600         kfree(buf);
1601         return err;
1602 }
1603
1604 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1605 {
1606         struct hci_mon_hdr *hdr;
1607         struct sk_buff *skb;
1608         struct hci_dev *hdev;
1609         u16 index;
1610         int err;
1611
1612         /* The logging frame consists at minimum of the standard header,
1613          * the priority byte, the ident length byte and at least one string
1614          * terminator NUL byte. Anything shorter are invalid packets.
1615          */
1616         if (len < sizeof(*hdr) + 3)
1617                 return -EINVAL;
1618
1619         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1620         if (!skb)
1621                 return err;
1622
1623         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1624                 err = -EFAULT;
1625                 goto drop;
1626         }
1627
1628         hdr = (void *)skb->data;
1629
1630         if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1631                 err = -EINVAL;
1632                 goto drop;
1633         }
1634
1635         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1636                 __u8 priority = skb->data[sizeof(*hdr)];
1637                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1638
1639                 /* Only the priorities 0-7 are valid and with that any other
1640                  * value results in an invalid packet.
1641                  *
1642                  * The priority byte is followed by an ident length byte and
1643                  * the NUL terminated ident string. Check that the ident
1644                  * length is not overflowing the packet and also that the
1645                  * ident string itself is NUL terminated. In case the ident
1646                  * length is zero, the length value actually doubles as NUL
1647                  * terminator identifier.
1648                  *
1649                  * The message follows the ident string (if present) and
1650                  * must be NUL terminated. Otherwise it is not a valid packet.
1651                  */
1652                 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1653                     ident_len > len - sizeof(*hdr) - 3 ||
1654                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1655                         err = -EINVAL;
1656                         goto drop;
1657                 }
1658         } else {
1659                 err = -EINVAL;
1660                 goto drop;
1661         }
1662
1663         index = __le16_to_cpu(hdr->index);
1664
1665         if (index != MGMT_INDEX_NONE) {
1666                 hdev = hci_dev_get(index);
1667                 if (!hdev) {
1668                         err = -ENODEV;
1669                         goto drop;
1670                 }
1671         } else {
1672                 hdev = NULL;
1673         }
1674
1675         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1676
1677         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1678         err = len;
1679
1680         if (hdev)
1681                 hci_dev_put(hdev);
1682
1683 drop:
1684         kfree_skb(skb);
1685         return err;
1686 }
1687
1688 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1689                             size_t len)
1690 {
1691         struct sock *sk = sock->sk;
1692         struct hci_mgmt_chan *chan;
1693         struct hci_dev *hdev;
1694         struct sk_buff *skb;
1695         int err;
1696
1697         BT_DBG("sock %p sk %p", sock, sk);
1698
1699         if (msg->msg_flags & MSG_OOB)
1700                 return -EOPNOTSUPP;
1701
1702         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1703                                MSG_CMSG_COMPAT))
1704                 return -EINVAL;
1705
1706         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1707                 return -EINVAL;
1708
1709         lock_sock(sk);
1710
1711         switch (hci_pi(sk)->channel) {
1712         case HCI_CHANNEL_RAW:
1713         case HCI_CHANNEL_USER:
1714                 break;
1715         case HCI_CHANNEL_MONITOR:
1716                 err = -EOPNOTSUPP;
1717                 goto done;
1718         case HCI_CHANNEL_LOGGING:
1719                 err = hci_logging_frame(sk, msg, len);
1720                 goto done;
1721         default:
1722                 mutex_lock(&mgmt_chan_list_lock);
1723                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1724                 if (chan)
1725                         err = hci_mgmt_cmd(chan, sk, msg, len);
1726                 else
1727                         err = -EINVAL;
1728
1729                 mutex_unlock(&mgmt_chan_list_lock);
1730                 goto done;
1731         }
1732
1733         hdev = hci_hdev_from_sock(sk);
1734         if (IS_ERR(hdev)) {
1735                 err = PTR_ERR(hdev);
1736                 goto done;
1737         }
1738
1739         if (!test_bit(HCI_UP, &hdev->flags)) {
1740                 err = -ENETDOWN;
1741                 goto done;
1742         }
1743
1744         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1745         if (!skb)
1746                 goto done;
1747
1748         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1749                 err = -EFAULT;
1750                 goto drop;
1751         }
1752
1753         hci_skb_pkt_type(skb) = skb->data[0];
1754         skb_pull(skb, 1);
1755
1756         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1757                 /* No permission check is needed for user channel
1758                  * since that gets enforced when binding the socket.
1759                  *
1760                  * However check that the packet type is valid.
1761                  */
1762                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1763                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1764                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1765                         err = -EINVAL;
1766                         goto drop;
1767                 }
1768
1769                 skb_queue_tail(&hdev->raw_q, skb);
1770                 queue_work(hdev->workqueue, &hdev->tx_work);
1771         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1772                 u16 opcode = get_unaligned_le16(skb->data);
1773                 u16 ogf = hci_opcode_ogf(opcode);
1774                 u16 ocf = hci_opcode_ocf(opcode);
1775
1776                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1777                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1778                                    &hci_sec_filter.ocf_mask[ogf])) &&
1779                     !capable(CAP_NET_RAW)) {
1780                         err = -EPERM;
1781                         goto drop;
1782                 }
1783
1784                 /* Since the opcode has already been extracted here, store
1785                  * a copy of the value for later use by the drivers.
1786                  */
1787                 hci_skb_opcode(skb) = opcode;
1788
1789                 if (ogf == 0x3f) {
1790                         skb_queue_tail(&hdev->raw_q, skb);
1791                         queue_work(hdev->workqueue, &hdev->tx_work);
1792                 } else {
1793                         /* Stand-alone HCI commands must be flagged as
1794                          * single-command requests.
1795                          */
1796                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1797
1798                         skb_queue_tail(&hdev->cmd_q, skb);
1799                         queue_work(hdev->workqueue, &hdev->cmd_work);
1800                 }
1801         } else {
1802                 if (!capable(CAP_NET_RAW)) {
1803                         err = -EPERM;
1804                         goto drop;
1805                 }
1806
1807                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1808                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1809                         err = -EINVAL;
1810                         goto drop;
1811                 }
1812
1813                 skb_queue_tail(&hdev->raw_q, skb);
1814                 queue_work(hdev->workqueue, &hdev->tx_work);
1815         }
1816
1817         err = len;
1818
1819 done:
1820         release_sock(sk);
1821         return err;
1822
1823 drop:
1824         kfree_skb(skb);
1825         goto done;
1826 }
1827
1828 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1829                                char __user *optval, unsigned int len)
1830 {
1831         struct hci_ufilter uf = { .opcode = 0 };
1832         struct sock *sk = sock->sk;
1833         int err = 0, opt = 0;
1834
1835         BT_DBG("sk %p, opt %d", sk, optname);
1836
1837         if (level != SOL_HCI)
1838                 return -ENOPROTOOPT;
1839
1840         lock_sock(sk);
1841
1842         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1843                 err = -EBADFD;
1844                 goto done;
1845         }
1846
1847         switch (optname) {
1848         case HCI_DATA_DIR:
1849                 if (get_user(opt, (int __user *)optval)) {
1850                         err = -EFAULT;
1851                         break;
1852                 }
1853
1854                 if (opt)
1855                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1856                 else
1857                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1858                 break;
1859
1860         case HCI_TIME_STAMP:
1861                 if (get_user(opt, (int __user *)optval)) {
1862                         err = -EFAULT;
1863                         break;
1864                 }
1865
1866                 if (opt)
1867                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1868                 else
1869                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1870                 break;
1871
1872         case HCI_FILTER:
1873                 {
1874                         struct hci_filter *f = &hci_pi(sk)->filter;
1875
1876                         uf.type_mask = f->type_mask;
1877                         uf.opcode    = f->opcode;
1878                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1879                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1880                 }
1881
1882                 len = min_t(unsigned int, len, sizeof(uf));
1883                 if (copy_from_user(&uf, optval, len)) {
1884                         err = -EFAULT;
1885                         break;
1886                 }
1887
1888                 if (!capable(CAP_NET_RAW)) {
1889                         uf.type_mask &= hci_sec_filter.type_mask;
1890                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1891                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1892                 }
1893
1894                 {
1895                         struct hci_filter *f = &hci_pi(sk)->filter;
1896
1897                         f->type_mask = uf.type_mask;
1898                         f->opcode    = uf.opcode;
1899                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1900                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1901                 }
1902                 break;
1903
1904         default:
1905                 err = -ENOPROTOOPT;
1906                 break;
1907         }
1908
1909 done:
1910         release_sock(sk);
1911         return err;
1912 }
1913
1914 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1915                                char __user *optval, int __user *optlen)
1916 {
1917         struct hci_ufilter uf;
1918         struct sock *sk = sock->sk;
1919         int len, opt, err = 0;
1920
1921         BT_DBG("sk %p, opt %d", sk, optname);
1922
1923         if (level != SOL_HCI)
1924                 return -ENOPROTOOPT;
1925
1926         if (get_user(len, optlen))
1927                 return -EFAULT;
1928
1929         lock_sock(sk);
1930
1931         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1932                 err = -EBADFD;
1933                 goto done;
1934         }
1935
1936         switch (optname) {
1937         case HCI_DATA_DIR:
1938                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1939                         opt = 1;
1940                 else
1941                         opt = 0;
1942
1943                 if (put_user(opt, optval))
1944                         err = -EFAULT;
1945                 break;
1946
1947         case HCI_TIME_STAMP:
1948                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1949                         opt = 1;
1950                 else
1951                         opt = 0;
1952
1953                 if (put_user(opt, optval))
1954                         err = -EFAULT;
1955                 break;
1956
1957         case HCI_FILTER:
1958                 {
1959                         struct hci_filter *f = &hci_pi(sk)->filter;
1960
1961                         memset(&uf, 0, sizeof(uf));
1962                         uf.type_mask = f->type_mask;
1963                         uf.opcode    = f->opcode;
1964                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1965                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1966                 }
1967
1968                 len = min_t(unsigned int, len, sizeof(uf));
1969                 if (copy_to_user(optval, &uf, len))
1970                         err = -EFAULT;
1971                 break;
1972
1973         default:
1974                 err = -ENOPROTOOPT;
1975                 break;
1976         }
1977
1978 done:
1979         release_sock(sk);
1980         return err;
1981 }
1982
1983 static const struct proto_ops hci_sock_ops = {
1984         .family         = PF_BLUETOOTH,
1985         .owner          = THIS_MODULE,
1986         .release        = hci_sock_release,
1987         .bind           = hci_sock_bind,
1988         .getname        = hci_sock_getname,
1989         .sendmsg        = hci_sock_sendmsg,
1990         .recvmsg        = hci_sock_recvmsg,
1991         .ioctl          = hci_sock_ioctl,
1992         .poll           = datagram_poll,
1993         .listen         = sock_no_listen,
1994         .shutdown       = sock_no_shutdown,
1995         .setsockopt     = hci_sock_setsockopt,
1996         .getsockopt     = hci_sock_getsockopt,
1997         .connect        = sock_no_connect,
1998         .socketpair     = sock_no_socketpair,
1999         .accept         = sock_no_accept,
2000         .mmap           = sock_no_mmap
2001 };
2002
2003 static struct proto hci_sk_proto = {
2004         .name           = "HCI",
2005         .owner          = THIS_MODULE,
2006         .obj_size       = sizeof(struct hci_pinfo)
2007 };
2008
2009 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2010                            int kern)
2011 {
2012         struct sock *sk;
2013
2014         BT_DBG("sock %p", sock);
2015
2016         if (sock->type != SOCK_RAW)
2017                 return -ESOCKTNOSUPPORT;
2018
2019         sock->ops = &hci_sock_ops;
2020
2021         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2022         if (!sk)
2023                 return -ENOMEM;
2024
2025         sock_init_data(sock, sk);
2026
2027         sock_reset_flag(sk, SOCK_ZAPPED);
2028
2029         sk->sk_protocol = protocol;
2030
2031         sock->state = SS_UNCONNECTED;
2032         sk->sk_state = BT_OPEN;
2033
2034         bt_sock_link(&hci_sk_list, sk);
2035         return 0;
2036 }
2037
2038 static const struct net_proto_family hci_sock_family_ops = {
2039         .family = PF_BLUETOOTH,
2040         .owner  = THIS_MODULE,
2041         .create = hci_sock_create,
2042 };
2043
2044 int __init hci_sock_init(void)
2045 {
2046         int err;
2047
2048         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2049
2050         err = proto_register(&hci_sk_proto, 0);
2051         if (err < 0)
2052                 return err;
2053
2054         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2055         if (err < 0) {
2056                 BT_ERR("HCI socket registration failed");
2057                 goto error;
2058         }
2059
2060         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2061         if (err < 0) {
2062                 BT_ERR("Failed to create HCI proc file");
2063                 bt_sock_unregister(BTPROTO_HCI);
2064                 goto error;
2065         }
2066
2067         BT_INFO("HCI socket layer initialized");
2068
2069         return 0;
2070
2071 error:
2072         proto_unregister(&hci_sk_proto);
2073         return err;
2074 }
2075
2076 void hci_sock_cleanup(void)
2077 {
2078         bt_procfs_cleanup(&init_net, "hci");
2079         bt_sock_unregister(BTPROTO_HCI);
2080         proto_unregister(&hci_sk_proto);
2081 }