GNU Linux-libre 4.19.286-gnu1
[releases.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52         struct bt_sock    bt;
53         struct hci_dev    *hdev;
54         struct hci_filter filter;
55         __u32             cmsg_mask;
56         unsigned short    channel;
57         unsigned long     flags;
58         __u32             cookie;
59         char              comm[TASK_COMM_LEN];
60 };
61
62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64         struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66         if (!hdev)
67                 return ERR_PTR(-EBADFD);
68         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69                 return ERR_PTR(-EPIPE);
70         return hdev;
71 }
72
73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75         set_bit(nr, &hci_pi(sk)->flags);
76 }
77
78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80         clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85         return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90         return hci_pi(sk)->channel;
91 }
92
93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95         return hci_pi(sk)->cookie;
96 }
97
98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100         int id = hci_pi(sk)->cookie;
101
102         if (!id) {
103                 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104                 if (id < 0)
105                         id = 0xffffffff;
106
107                 hci_pi(sk)->cookie = id;
108                 get_task_comm(hci_pi(sk)->comm, current);
109                 return true;
110         }
111
112         return false;
113 }
114
115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117         int id = hci_pi(sk)->cookie;
118
119         if (id) {
120                 hci_pi(sk)->cookie = 0xffffffff;
121                 ida_simple_remove(&sock_cookie_ida, id);
122         }
123 }
124
125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF  5
132
133 struct hci_sec_filter {
134         __u32 type_mask;
135         __u32 event_mask[2];
136         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140         /* Packet types */
141         0x10,
142         /* Events */
143         { 0x1000d9fe, 0x0000b00c },
144         /* Commands */
145         {
146                 { 0x0 },
147                 /* OGF_LINK_CTL */
148                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149                 /* OGF_LINK_POLICY */
150                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151                 /* OGF_HOST_CTL */
152                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153                 /* OGF_INFO_PARAM */
154                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155                 /* OGF_STATUS_PARAM */
156                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157         }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166         struct hci_filter *flt;
167         int flt_type, flt_event;
168
169         /* Apply filter */
170         flt = &hci_pi(sk)->filter;
171
172         flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174         if (!test_bit(flt_type, &flt->type_mask))
175                 return true;
176
177         /* Extra filter for event packets only */
178         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179                 return false;
180
181         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183         if (!hci_test_bit(flt_event, &flt->event_mask))
184                 return true;
185
186         /* Check filter only when opcode is set */
187         if (!flt->opcode)
188                 return false;
189
190         if (flt_event == HCI_EV_CMD_COMPLETE &&
191             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192                 return true;
193
194         if (flt_event == HCI_EV_CMD_STATUS &&
195             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196                 return true;
197
198         return false;
199 }
200
201 /* Send frame to RAW socket */
202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204         struct sock *sk;
205         struct sk_buff *skb_copy = NULL;
206
207         BT_DBG("hdev %p len %d", hdev, skb->len);
208
209         read_lock(&hci_sk_list.lock);
210
211         sk_for_each(sk, &hci_sk_list.head) {
212                 struct sk_buff *nskb;
213
214                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215                         continue;
216
217                 /* Don't send frame to the socket it came from */
218                 if (skb->sk == sk)
219                         continue;
220
221                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222                         if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223                             hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
226                                 continue;
227                         if (is_filtered_packet(sk, skb))
228                                 continue;
229                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230                         if (!bt_cb(skb)->incoming)
231                                 continue;
232                         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233                             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234                             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
235                                 continue;
236                 } else {
237                         /* Don't send frame to other channel types */
238                         continue;
239                 }
240
241                 if (!skb_copy) {
242                         /* Create a private copy with headroom */
243                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
244                         if (!skb_copy)
245                                 continue;
246
247                         /* Put type byte before the data */
248                         memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
249                 }
250
251                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
252                 if (!nskb)
253                         continue;
254
255                 if (sock_queue_rcv_skb(sk, nskb))
256                         kfree_skb(nskb);
257         }
258
259         read_unlock(&hci_sk_list.lock);
260
261         kfree_skb(skb_copy);
262 }
263
264 /* Send frame to sockets with specific channel */
265 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266                                   int flag, struct sock *skip_sk)
267 {
268         struct sock *sk;
269
270         BT_DBG("channel %u len %d", channel, skb->len);
271
272         sk_for_each(sk, &hci_sk_list.head) {
273                 struct sk_buff *nskb;
274
275                 /* Ignore socket without the flag set */
276                 if (!hci_sock_test_flag(sk, flag))
277                         continue;
278
279                 /* Skip the original socket */
280                 if (sk == skip_sk)
281                         continue;
282
283                 if (sk->sk_state != BT_BOUND)
284                         continue;
285
286                 if (hci_pi(sk)->channel != channel)
287                         continue;
288
289                 nskb = skb_clone(skb, GFP_ATOMIC);
290                 if (!nskb)
291                         continue;
292
293                 if (sock_queue_rcv_skb(sk, nskb))
294                         kfree_skb(nskb);
295         }
296
297 }
298
299 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
300                          int flag, struct sock *skip_sk)
301 {
302         read_lock(&hci_sk_list.lock);
303         __hci_send_to_channel(channel, skb, flag, skip_sk);
304         read_unlock(&hci_sk_list.lock);
305 }
306
307 /* Send frame to monitor socket */
308 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
309 {
310         struct sk_buff *skb_copy = NULL;
311         struct hci_mon_hdr *hdr;
312         __le16 opcode;
313
314         if (!atomic_read(&monitor_promisc))
315                 return;
316
317         BT_DBG("hdev %p len %d", hdev, skb->len);
318
319         switch (hci_skb_pkt_type(skb)) {
320         case HCI_COMMAND_PKT:
321                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
322                 break;
323         case HCI_EVENT_PKT:
324                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
325                 break;
326         case HCI_ACLDATA_PKT:
327                 if (bt_cb(skb)->incoming)
328                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
329                 else
330                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
331                 break;
332         case HCI_SCODATA_PKT:
333                 if (bt_cb(skb)->incoming)
334                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
335                 else
336                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
337                 break;
338         case HCI_DIAG_PKT:
339                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
340                 break;
341         default:
342                 return;
343         }
344
345         /* Create a private copy with headroom */
346         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
347         if (!skb_copy)
348                 return;
349
350         /* Put header before the data */
351         hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
352         hdr->opcode = opcode;
353         hdr->index = cpu_to_le16(hdev->id);
354         hdr->len = cpu_to_le16(skb->len);
355
356         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
357                             HCI_SOCK_TRUSTED, NULL);
358         kfree_skb(skb_copy);
359 }
360
361 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
362                                  void *data, u16 data_len, ktime_t tstamp,
363                                  int flag, struct sock *skip_sk)
364 {
365         struct sock *sk;
366         __le16 index;
367
368         if (hdev)
369                 index = cpu_to_le16(hdev->id);
370         else
371                 index = cpu_to_le16(MGMT_INDEX_NONE);
372
373         read_lock(&hci_sk_list.lock);
374
375         sk_for_each(sk, &hci_sk_list.head) {
376                 struct hci_mon_hdr *hdr;
377                 struct sk_buff *skb;
378
379                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
380                         continue;
381
382                 /* Ignore socket without the flag set */
383                 if (!hci_sock_test_flag(sk, flag))
384                         continue;
385
386                 /* Skip the original socket */
387                 if (sk == skip_sk)
388                         continue;
389
390                 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
391                 if (!skb)
392                         continue;
393
394                 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
395                 put_unaligned_le16(event, skb_put(skb, 2));
396
397                 if (data)
398                         skb_put_data(skb, data, data_len);
399
400                 skb->tstamp = tstamp;
401
402                 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
403                 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
404                 hdr->index = index;
405                 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407                 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
408                                       HCI_SOCK_TRUSTED, NULL);
409                 kfree_skb(skb);
410         }
411
412         read_unlock(&hci_sk_list.lock);
413 }
414
415 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
416 {
417         struct hci_mon_hdr *hdr;
418         struct hci_mon_new_index *ni;
419         struct hci_mon_index_info *ii;
420         struct sk_buff *skb;
421         __le16 opcode;
422
423         switch (event) {
424         case HCI_DEV_REG:
425                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
426                 if (!skb)
427                         return NULL;
428
429                 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
430                 ni->type = hdev->dev_type;
431                 ni->bus = hdev->bus;
432                 bacpy(&ni->bdaddr, &hdev->bdaddr);
433                 memcpy(ni->name, hdev->name, 8);
434
435                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
436                 break;
437
438         case HCI_DEV_UNREG:
439                 skb = bt_skb_alloc(0, GFP_ATOMIC);
440                 if (!skb)
441                         return NULL;
442
443                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
444                 break;
445
446         case HCI_DEV_SETUP:
447                 if (hdev->manufacturer == 0xffff)
448                         return NULL;
449
450                 /* fall through */
451
452         case HCI_DEV_UP:
453                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
454                 if (!skb)
455                         return NULL;
456
457                 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
458                 bacpy(&ii->bdaddr, &hdev->bdaddr);
459                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
460
461                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
462                 break;
463
464         case HCI_DEV_OPEN:
465                 skb = bt_skb_alloc(0, GFP_ATOMIC);
466                 if (!skb)
467                         return NULL;
468
469                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
470                 break;
471
472         case HCI_DEV_CLOSE:
473                 skb = bt_skb_alloc(0, GFP_ATOMIC);
474                 if (!skb)
475                         return NULL;
476
477                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
478                 break;
479
480         default:
481                 return NULL;
482         }
483
484         __net_timestamp(skb);
485
486         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
487         hdr->opcode = opcode;
488         hdr->index = cpu_to_le16(hdev->id);
489         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
490
491         return skb;
492 }
493
494 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
495 {
496         struct hci_mon_hdr *hdr;
497         struct sk_buff *skb;
498         u16 format;
499         u8 ver[3];
500         u32 flags;
501
502         /* No message needed when cookie is not present */
503         if (!hci_pi(sk)->cookie)
504                 return NULL;
505
506         switch (hci_pi(sk)->channel) {
507         case HCI_CHANNEL_RAW:
508                 format = 0x0000;
509                 ver[0] = BT_SUBSYS_VERSION;
510                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
511                 break;
512         case HCI_CHANNEL_USER:
513                 format = 0x0001;
514                 ver[0] = BT_SUBSYS_VERSION;
515                 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
516                 break;
517         case HCI_CHANNEL_CONTROL:
518                 format = 0x0002;
519                 mgmt_fill_version_info(ver);
520                 break;
521         default:
522                 /* No message for unsupported format */
523                 return NULL;
524         }
525
526         skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
527         if (!skb)
528                 return NULL;
529
530         flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
531
532         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
533         put_unaligned_le16(format, skb_put(skb, 2));
534         skb_put_data(skb, ver, sizeof(ver));
535         put_unaligned_le32(flags, skb_put(skb, 4));
536         skb_put_u8(skb, TASK_COMM_LEN);
537         skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
538
539         __net_timestamp(skb);
540
541         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
542         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
543         if (hci_pi(sk)->hdev)
544                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
545         else
546                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
547         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
548
549         return skb;
550 }
551
552 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
553 {
554         struct hci_mon_hdr *hdr;
555         struct sk_buff *skb;
556
557         /* No message needed when cookie is not present */
558         if (!hci_pi(sk)->cookie)
559                 return NULL;
560
561         switch (hci_pi(sk)->channel) {
562         case HCI_CHANNEL_RAW:
563         case HCI_CHANNEL_USER:
564         case HCI_CHANNEL_CONTROL:
565                 break;
566         default:
567                 /* No message for unsupported format */
568                 return NULL;
569         }
570
571         skb = bt_skb_alloc(4, GFP_ATOMIC);
572         if (!skb)
573                 return NULL;
574
575         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
576
577         __net_timestamp(skb);
578
579         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
580         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
581         if (hci_pi(sk)->hdev)
582                 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
583         else
584                 hdr->index = cpu_to_le16(HCI_DEV_NONE);
585         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
586
587         return skb;
588 }
589
590 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
591                                                    u16 opcode, u16 len,
592                                                    const void *buf)
593 {
594         struct hci_mon_hdr *hdr;
595         struct sk_buff *skb;
596
597         skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
598         if (!skb)
599                 return NULL;
600
601         put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
602         put_unaligned_le16(opcode, skb_put(skb, 2));
603
604         if (buf)
605                 skb_put_data(skb, buf, len);
606
607         __net_timestamp(skb);
608
609         hdr = skb_push(skb, HCI_MON_HDR_SIZE);
610         hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
611         hdr->index = cpu_to_le16(index);
612         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
613
614         return skb;
615 }
616
617 static void __printf(2, 3)
618 send_monitor_note(struct sock *sk, const char *fmt, ...)
619 {
620         size_t len;
621         struct hci_mon_hdr *hdr;
622         struct sk_buff *skb;
623         va_list args;
624
625         va_start(args, fmt);
626         len = vsnprintf(NULL, 0, fmt, args);
627         va_end(args);
628
629         skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
630         if (!skb)
631                 return;
632
633         va_start(args, fmt);
634         vsprintf(skb_put(skb, len), fmt, args);
635         *(u8 *)skb_put(skb, 1) = 0;
636         va_end(args);
637
638         __net_timestamp(skb);
639
640         hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
641         hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
642         hdr->index = cpu_to_le16(HCI_DEV_NONE);
643         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
644
645         if (sock_queue_rcv_skb(sk, skb))
646                 kfree_skb(skb);
647 }
648
649 static void send_monitor_replay(struct sock *sk)
650 {
651         struct hci_dev *hdev;
652
653         read_lock(&hci_dev_list_lock);
654
655         list_for_each_entry(hdev, &hci_dev_list, list) {
656                 struct sk_buff *skb;
657
658                 skb = create_monitor_event(hdev, HCI_DEV_REG);
659                 if (!skb)
660                         continue;
661
662                 if (sock_queue_rcv_skb(sk, skb))
663                         kfree_skb(skb);
664
665                 if (!test_bit(HCI_RUNNING, &hdev->flags))
666                         continue;
667
668                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
669                 if (!skb)
670                         continue;
671
672                 if (sock_queue_rcv_skb(sk, skb))
673                         kfree_skb(skb);
674
675                 if (test_bit(HCI_UP, &hdev->flags))
676                         skb = create_monitor_event(hdev, HCI_DEV_UP);
677                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
678                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
679                 else
680                         skb = NULL;
681
682                 if (skb) {
683                         if (sock_queue_rcv_skb(sk, skb))
684                                 kfree_skb(skb);
685                 }
686         }
687
688         read_unlock(&hci_dev_list_lock);
689 }
690
691 static void send_monitor_control_replay(struct sock *mon_sk)
692 {
693         struct sock *sk;
694
695         read_lock(&hci_sk_list.lock);
696
697         sk_for_each(sk, &hci_sk_list.head) {
698                 struct sk_buff *skb;
699
700                 skb = create_monitor_ctrl_open(sk);
701                 if (!skb)
702                         continue;
703
704                 if (sock_queue_rcv_skb(mon_sk, skb))
705                         kfree_skb(skb);
706         }
707
708         read_unlock(&hci_sk_list.lock);
709 }
710
711 /* Generate internal stack event */
712 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
713 {
714         struct hci_event_hdr *hdr;
715         struct hci_ev_stack_internal *ev;
716         struct sk_buff *skb;
717
718         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
719         if (!skb)
720                 return;
721
722         hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
723         hdr->evt  = HCI_EV_STACK_INTERNAL;
724         hdr->plen = sizeof(*ev) + dlen;
725
726         ev = skb_put(skb, sizeof(*ev) + dlen);
727         ev->type = type;
728         memcpy(ev->data, data, dlen);
729
730         bt_cb(skb)->incoming = 1;
731         __net_timestamp(skb);
732
733         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
734         hci_send_to_sock(hdev, skb);
735         kfree_skb(skb);
736 }
737
738 void hci_sock_dev_event(struct hci_dev *hdev, int event)
739 {
740         BT_DBG("hdev %s event %d", hdev->name, event);
741
742         if (atomic_read(&monitor_promisc)) {
743                 struct sk_buff *skb;
744
745                 /* Send event to monitor */
746                 skb = create_monitor_event(hdev, event);
747                 if (skb) {
748                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
749                                             HCI_SOCK_TRUSTED, NULL);
750                         kfree_skb(skb);
751                 }
752         }
753
754         if (event <= HCI_DEV_DOWN) {
755                 struct hci_ev_si_device ev;
756
757                 /* Send event to sockets */
758                 ev.event  = event;
759                 ev.dev_id = hdev->id;
760                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
761         }
762
763         if (event == HCI_DEV_UNREG) {
764                 struct sock *sk;
765
766                 /* Wake up sockets using this dead device */
767                 read_lock(&hci_sk_list.lock);
768                 sk_for_each(sk, &hci_sk_list.head) {
769                         if (hci_pi(sk)->hdev == hdev) {
770                                 sk->sk_err = EPIPE;
771                                 sk->sk_state_change(sk);
772                         }
773                 }
774                 read_unlock(&hci_sk_list.lock);
775         }
776 }
777
778 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
779 {
780         struct hci_mgmt_chan *c;
781
782         list_for_each_entry(c, &mgmt_chan_list, list) {
783                 if (c->channel == channel)
784                         return c;
785         }
786
787         return NULL;
788 }
789
790 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
791 {
792         struct hci_mgmt_chan *c;
793
794         mutex_lock(&mgmt_chan_list_lock);
795         c = __hci_mgmt_chan_find(channel);
796         mutex_unlock(&mgmt_chan_list_lock);
797
798         return c;
799 }
800
801 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
802 {
803         if (c->channel < HCI_CHANNEL_CONTROL)
804                 return -EINVAL;
805
806         mutex_lock(&mgmt_chan_list_lock);
807         if (__hci_mgmt_chan_find(c->channel)) {
808                 mutex_unlock(&mgmt_chan_list_lock);
809                 return -EALREADY;
810         }
811
812         list_add_tail(&c->list, &mgmt_chan_list);
813
814         mutex_unlock(&mgmt_chan_list_lock);
815
816         return 0;
817 }
818 EXPORT_SYMBOL(hci_mgmt_chan_register);
819
820 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
821 {
822         mutex_lock(&mgmt_chan_list_lock);
823         list_del(&c->list);
824         mutex_unlock(&mgmt_chan_list_lock);
825 }
826 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
827
828 static int hci_sock_release(struct socket *sock)
829 {
830         struct sock *sk = sock->sk;
831         struct hci_dev *hdev;
832         struct sk_buff *skb;
833
834         BT_DBG("sock %p sk %p", sock, sk);
835
836         if (!sk)
837                 return 0;
838
839         lock_sock(sk);
840
841         switch (hci_pi(sk)->channel) {
842         case HCI_CHANNEL_MONITOR:
843                 atomic_dec(&monitor_promisc);
844                 break;
845         case HCI_CHANNEL_RAW:
846         case HCI_CHANNEL_USER:
847         case HCI_CHANNEL_CONTROL:
848                 /* Send event to monitor */
849                 skb = create_monitor_ctrl_close(sk);
850                 if (skb) {
851                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
852                                             HCI_SOCK_TRUSTED, NULL);
853                         kfree_skb(skb);
854                 }
855
856                 hci_sock_free_cookie(sk);
857                 break;
858         }
859
860         bt_sock_unlink(&hci_sk_list, sk);
861
862         hdev = hci_pi(sk)->hdev;
863         if (hdev) {
864                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
865                         /* When releasing a user channel exclusive access,
866                          * call hci_dev_do_close directly instead of calling
867                          * hci_dev_close to ensure the exclusive access will
868                          * be released and the controller brought back down.
869                          *
870                          * The checking of HCI_AUTO_OFF is not needed in this
871                          * case since it will have been cleared already when
872                          * opening the user channel.
873                          */
874                         hci_dev_do_close(hdev);
875                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
876                         mgmt_index_added(hdev);
877                 }
878
879                 atomic_dec(&hdev->promisc);
880                 hci_dev_put(hdev);
881         }
882
883         sock_orphan(sk);
884         release_sock(sk);
885         sock_put(sk);
886         return 0;
887 }
888
889 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
890 {
891         bdaddr_t bdaddr;
892         int err;
893
894         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
895                 return -EFAULT;
896
897         hci_dev_lock(hdev);
898
899         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
900
901         hci_dev_unlock(hdev);
902
903         return err;
904 }
905
906 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
907 {
908         bdaddr_t bdaddr;
909         int err;
910
911         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
912                 return -EFAULT;
913
914         hci_dev_lock(hdev);
915
916         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
917
918         hci_dev_unlock(hdev);
919
920         return err;
921 }
922
923 /* Ioctls that require bound socket */
924 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
925                                 unsigned long arg)
926 {
927         struct hci_dev *hdev = hci_hdev_from_sock(sk);
928
929         if (IS_ERR(hdev))
930                 return PTR_ERR(hdev);
931
932         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
933                 return -EBUSY;
934
935         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
936                 return -EOPNOTSUPP;
937
938         if (hdev->dev_type != HCI_PRIMARY)
939                 return -EOPNOTSUPP;
940
941         switch (cmd) {
942         case HCISETRAW:
943                 if (!capable(CAP_NET_ADMIN))
944                         return -EPERM;
945                 return -EOPNOTSUPP;
946
947         case HCIGETCONNINFO:
948                 return hci_get_conn_info(hdev, (void __user *)arg);
949
950         case HCIGETAUTHINFO:
951                 return hci_get_auth_info(hdev, (void __user *)arg);
952
953         case HCIBLOCKADDR:
954                 if (!capable(CAP_NET_ADMIN))
955                         return -EPERM;
956                 return hci_sock_blacklist_add(hdev, (void __user *)arg);
957
958         case HCIUNBLOCKADDR:
959                 if (!capable(CAP_NET_ADMIN))
960                         return -EPERM;
961                 return hci_sock_blacklist_del(hdev, (void __user *)arg);
962         }
963
964         return -ENOIOCTLCMD;
965 }
966
967 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
968                           unsigned long arg)
969 {
970         void __user *argp = (void __user *)arg;
971         struct sock *sk = sock->sk;
972         int err;
973
974         BT_DBG("cmd %x arg %lx", cmd, arg);
975
976         /* Make sure the cmd is valid before doing anything */
977         switch (cmd) {
978         case HCIGETDEVLIST:
979         case HCIGETDEVINFO:
980         case HCIGETCONNLIST:
981         case HCIDEVUP:
982         case HCIDEVDOWN:
983         case HCIDEVRESET:
984         case HCIDEVRESTAT:
985         case HCISETSCAN:
986         case HCISETAUTH:
987         case HCISETENCRYPT:
988         case HCISETPTYPE:
989         case HCISETLINKPOL:
990         case HCISETLINKMODE:
991         case HCISETACLMTU:
992         case HCISETSCOMTU:
993         case HCIINQUIRY:
994         case HCISETRAW:
995         case HCIGETCONNINFO:
996         case HCIGETAUTHINFO:
997         case HCIBLOCKADDR:
998         case HCIUNBLOCKADDR:
999                 break;
1000         default:
1001                 return -ENOIOCTLCMD;
1002         }
1003
1004         lock_sock(sk);
1005
1006         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1007                 err = -EBADFD;
1008                 goto done;
1009         }
1010
1011         /* When calling an ioctl on an unbound raw socket, then ensure
1012          * that the monitor gets informed. Ensure that the resulting event
1013          * is only send once by checking if the cookie exists or not. The
1014          * socket cookie will be only ever generated once for the lifetime
1015          * of a given socket.
1016          */
1017         if (hci_sock_gen_cookie(sk)) {
1018                 struct sk_buff *skb;
1019
1020                 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1021                  * flag. Make sure that not only the current task but also
1022                  * the socket opener has the required capability, since
1023                  * privileged programs can be tricked into making ioctl calls
1024                  * on HCI sockets, and the socket should not be marked as
1025                  * trusted simply because the ioctl caller is privileged.
1026                  */
1027                 if (sk_capable(sk, CAP_NET_ADMIN))
1028                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1029
1030                 /* Send event to monitor */
1031                 skb = create_monitor_ctrl_open(sk);
1032                 if (skb) {
1033                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1034                                             HCI_SOCK_TRUSTED, NULL);
1035                         kfree_skb(skb);
1036                 }
1037         }
1038
1039         release_sock(sk);
1040
1041         switch (cmd) {
1042         case HCIGETDEVLIST:
1043                 return hci_get_dev_list(argp);
1044
1045         case HCIGETDEVINFO:
1046                 return hci_get_dev_info(argp);
1047
1048         case HCIGETCONNLIST:
1049                 return hci_get_conn_list(argp);
1050
1051         case HCIDEVUP:
1052                 if (!capable(CAP_NET_ADMIN))
1053                         return -EPERM;
1054                 return hci_dev_open(arg);
1055
1056         case HCIDEVDOWN:
1057                 if (!capable(CAP_NET_ADMIN))
1058                         return -EPERM;
1059                 return hci_dev_close(arg);
1060
1061         case HCIDEVRESET:
1062                 if (!capable(CAP_NET_ADMIN))
1063                         return -EPERM;
1064                 return hci_dev_reset(arg);
1065
1066         case HCIDEVRESTAT:
1067                 if (!capable(CAP_NET_ADMIN))
1068                         return -EPERM;
1069                 return hci_dev_reset_stat(arg);
1070
1071         case HCISETSCAN:
1072         case HCISETAUTH:
1073         case HCISETENCRYPT:
1074         case HCISETPTYPE:
1075         case HCISETLINKPOL:
1076         case HCISETLINKMODE:
1077         case HCISETACLMTU:
1078         case HCISETSCOMTU:
1079                 if (!capable(CAP_NET_ADMIN))
1080                         return -EPERM;
1081                 return hci_dev_cmd(cmd, argp);
1082
1083         case HCIINQUIRY:
1084                 return hci_inquiry(argp);
1085         }
1086
1087         lock_sock(sk);
1088
1089         err = hci_sock_bound_ioctl(sk, cmd, arg);
1090
1091 done:
1092         release_sock(sk);
1093         return err;
1094 }
1095
1096 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1097                          int addr_len)
1098 {
1099         struct sockaddr_hci haddr;
1100         struct sock *sk = sock->sk;
1101         struct hci_dev *hdev = NULL;
1102         struct sk_buff *skb;
1103         int len, err = 0;
1104
1105         BT_DBG("sock %p sk %p", sock, sk);
1106
1107         if (!addr)
1108                 return -EINVAL;
1109
1110         memset(&haddr, 0, sizeof(haddr));
1111         len = min_t(unsigned int, sizeof(haddr), addr_len);
1112         memcpy(&haddr, addr, len);
1113
1114         if (haddr.hci_family != AF_BLUETOOTH)
1115                 return -EINVAL;
1116
1117         lock_sock(sk);
1118
1119         /* Allow detaching from dead device and attaching to alive device, if
1120          * the caller wants to re-bind (instead of close) this socket in
1121          * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1122          */
1123         hdev = hci_pi(sk)->hdev;
1124         if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1125                 hci_pi(sk)->hdev = NULL;
1126                 sk->sk_state = BT_OPEN;
1127                 hci_dev_put(hdev);
1128         }
1129         hdev = NULL;
1130
1131         if (sk->sk_state == BT_BOUND) {
1132                 err = -EALREADY;
1133                 goto done;
1134         }
1135
1136         switch (haddr.hci_channel) {
1137         case HCI_CHANNEL_RAW:
1138                 if (hci_pi(sk)->hdev) {
1139                         err = -EALREADY;
1140                         goto done;
1141                 }
1142
1143                 if (haddr.hci_dev != HCI_DEV_NONE) {
1144                         hdev = hci_dev_get(haddr.hci_dev);
1145                         if (!hdev) {
1146                                 err = -ENODEV;
1147                                 goto done;
1148                         }
1149
1150                         atomic_inc(&hdev->promisc);
1151                 }
1152
1153                 hci_pi(sk)->channel = haddr.hci_channel;
1154
1155                 if (!hci_sock_gen_cookie(sk)) {
1156                         /* In the case when a cookie has already been assigned,
1157                          * then there has been already an ioctl issued against
1158                          * an unbound socket and with that triggerd an open
1159                          * notification. Send a close notification first to
1160                          * allow the state transition to bounded.
1161                          */
1162                         skb = create_monitor_ctrl_close(sk);
1163                         if (skb) {
1164                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1165                                                     HCI_SOCK_TRUSTED, NULL);
1166                                 kfree_skb(skb);
1167                         }
1168                 }
1169
1170                 if (capable(CAP_NET_ADMIN))
1171                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1172
1173                 hci_pi(sk)->hdev = hdev;
1174
1175                 /* Send event to monitor */
1176                 skb = create_monitor_ctrl_open(sk);
1177                 if (skb) {
1178                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1179                                             HCI_SOCK_TRUSTED, NULL);
1180                         kfree_skb(skb);
1181                 }
1182                 break;
1183
1184         case HCI_CHANNEL_USER:
1185                 if (hci_pi(sk)->hdev) {
1186                         err = -EALREADY;
1187                         goto done;
1188                 }
1189
1190                 if (haddr.hci_dev == HCI_DEV_NONE) {
1191                         err = -EINVAL;
1192                         goto done;
1193                 }
1194
1195                 if (!capable(CAP_NET_ADMIN)) {
1196                         err = -EPERM;
1197                         goto done;
1198                 }
1199
1200                 hdev = hci_dev_get(haddr.hci_dev);
1201                 if (!hdev) {
1202                         err = -ENODEV;
1203                         goto done;
1204                 }
1205
1206                 if (test_bit(HCI_INIT, &hdev->flags) ||
1207                     hci_dev_test_flag(hdev, HCI_SETUP) ||
1208                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1209                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1210                      test_bit(HCI_UP, &hdev->flags))) {
1211                         err = -EBUSY;
1212                         hci_dev_put(hdev);
1213                         goto done;
1214                 }
1215
1216                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1217                         err = -EUSERS;
1218                         hci_dev_put(hdev);
1219                         goto done;
1220                 }
1221
1222                 mgmt_index_removed(hdev);
1223
1224                 err = hci_dev_open(hdev->id);
1225                 if (err) {
1226                         if (err == -EALREADY) {
1227                                 /* In case the transport is already up and
1228                                  * running, clear the error here.
1229                                  *
1230                                  * This can happen when opening a user
1231                                  * channel and HCI_AUTO_OFF grace period
1232                                  * is still active.
1233                                  */
1234                                 err = 0;
1235                         } else {
1236                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1237                                 mgmt_index_added(hdev);
1238                                 hci_dev_put(hdev);
1239                                 goto done;
1240                         }
1241                 }
1242
1243                 hci_pi(sk)->channel = haddr.hci_channel;
1244
1245                 if (!hci_sock_gen_cookie(sk)) {
1246                         /* In the case when a cookie has already been assigned,
1247                          * this socket will transition from a raw socket into
1248                          * a user channel socket. For a clean transition, send
1249                          * the close notification first.
1250                          */
1251                         skb = create_monitor_ctrl_close(sk);
1252                         if (skb) {
1253                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1254                                                     HCI_SOCK_TRUSTED, NULL);
1255                                 kfree_skb(skb);
1256                         }
1257                 }
1258
1259                 /* The user channel is restricted to CAP_NET_ADMIN
1260                  * capabilities and with that implicitly trusted.
1261                  */
1262                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1263
1264                 hci_pi(sk)->hdev = hdev;
1265
1266                 /* Send event to monitor */
1267                 skb = create_monitor_ctrl_open(sk);
1268                 if (skb) {
1269                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1270                                             HCI_SOCK_TRUSTED, NULL);
1271                         kfree_skb(skb);
1272                 }
1273
1274                 atomic_inc(&hdev->promisc);
1275                 break;
1276
1277         case HCI_CHANNEL_MONITOR:
1278                 if (haddr.hci_dev != HCI_DEV_NONE) {
1279                         err = -EINVAL;
1280                         goto done;
1281                 }
1282
1283                 if (!capable(CAP_NET_RAW)) {
1284                         err = -EPERM;
1285                         goto done;
1286                 }
1287
1288                 hci_pi(sk)->channel = haddr.hci_channel;
1289
1290                 /* The monitor interface is restricted to CAP_NET_RAW
1291                  * capabilities and with that implicitly trusted.
1292                  */
1293                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1294
1295                 send_monitor_note(sk, "Linux version %s (%s)",
1296                                   init_utsname()->release,
1297                                   init_utsname()->machine);
1298                 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1299                                   BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1300                 send_monitor_replay(sk);
1301                 send_monitor_control_replay(sk);
1302
1303                 atomic_inc(&monitor_promisc);
1304                 break;
1305
1306         case HCI_CHANNEL_LOGGING:
1307                 if (haddr.hci_dev != HCI_DEV_NONE) {
1308                         err = -EINVAL;
1309                         goto done;
1310                 }
1311
1312                 if (!capable(CAP_NET_ADMIN)) {
1313                         err = -EPERM;
1314                         goto done;
1315                 }
1316
1317                 hci_pi(sk)->channel = haddr.hci_channel;
1318                 break;
1319
1320         default:
1321                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1322                         err = -EINVAL;
1323                         goto done;
1324                 }
1325
1326                 if (haddr.hci_dev != HCI_DEV_NONE) {
1327                         err = -EINVAL;
1328                         goto done;
1329                 }
1330
1331                 /* Users with CAP_NET_ADMIN capabilities are allowed
1332                  * access to all management commands and events. For
1333                  * untrusted users the interface is restricted and
1334                  * also only untrusted events are sent.
1335                  */
1336                 if (capable(CAP_NET_ADMIN))
1337                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1338
1339                 hci_pi(sk)->channel = haddr.hci_channel;
1340
1341                 /* At the moment the index and unconfigured index events
1342                  * are enabled unconditionally. Setting them on each
1343                  * socket when binding keeps this functionality. They
1344                  * however might be cleared later and then sending of these
1345                  * events will be disabled, but that is then intentional.
1346                  *
1347                  * This also enables generic events that are safe to be
1348                  * received by untrusted users. Example for such events
1349                  * are changes to settings, class of device, name etc.
1350                  */
1351                 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1352                         if (!hci_sock_gen_cookie(sk)) {
1353                                 /* In the case when a cookie has already been
1354                                  * assigned, this socket will transtion from
1355                                  * a raw socket into a control socket. To
1356                                  * allow for a clean transtion, send the
1357                                  * close notification first.
1358                                  */
1359                                 skb = create_monitor_ctrl_close(sk);
1360                                 if (skb) {
1361                                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1362                                                             HCI_SOCK_TRUSTED, NULL);
1363                                         kfree_skb(skb);
1364                                 }
1365                         }
1366
1367                         /* Send event to monitor */
1368                         skb = create_monitor_ctrl_open(sk);
1369                         if (skb) {
1370                                 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1371                                                     HCI_SOCK_TRUSTED, NULL);
1372                                 kfree_skb(skb);
1373                         }
1374
1375                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1376                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1377                         hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1378                         hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1379                         hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1380                         hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1381                 }
1382                 break;
1383         }
1384
1385         sk->sk_state = BT_BOUND;
1386
1387 done:
1388         release_sock(sk);
1389         return err;
1390 }
1391
1392 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1393                             int peer)
1394 {
1395         struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1396         struct sock *sk = sock->sk;
1397         struct hci_dev *hdev;
1398         int err = 0;
1399
1400         BT_DBG("sock %p sk %p", sock, sk);
1401
1402         if (peer)
1403                 return -EOPNOTSUPP;
1404
1405         lock_sock(sk);
1406
1407         hdev = hci_hdev_from_sock(sk);
1408         if (IS_ERR(hdev)) {
1409                 err = PTR_ERR(hdev);
1410                 goto done;
1411         }
1412
1413         haddr->hci_family = AF_BLUETOOTH;
1414         haddr->hci_dev    = hdev->id;
1415         haddr->hci_channel= hci_pi(sk)->channel;
1416         err = sizeof(*haddr);
1417
1418 done:
1419         release_sock(sk);
1420         return err;
1421 }
1422
1423 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1424                           struct sk_buff *skb)
1425 {
1426         __u32 mask = hci_pi(sk)->cmsg_mask;
1427
1428         if (mask & HCI_CMSG_DIR) {
1429                 int incoming = bt_cb(skb)->incoming;
1430                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1431                          &incoming);
1432         }
1433
1434         if (mask & HCI_CMSG_TSTAMP) {
1435 #ifdef CONFIG_COMPAT
1436                 struct compat_timeval ctv;
1437 #endif
1438                 struct timeval tv;
1439                 void *data;
1440                 int len;
1441
1442                 skb_get_timestamp(skb, &tv);
1443
1444                 data = &tv;
1445                 len = sizeof(tv);
1446 #ifdef CONFIG_COMPAT
1447                 if (!COMPAT_USE_64BIT_TIME &&
1448                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
1449                         ctv.tv_sec = tv.tv_sec;
1450                         ctv.tv_usec = tv.tv_usec;
1451                         data = &ctv;
1452                         len = sizeof(ctv);
1453                 }
1454 #endif
1455
1456                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1457         }
1458 }
1459
1460 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1461                             size_t len, int flags)
1462 {
1463         int noblock = flags & MSG_DONTWAIT;
1464         struct sock *sk = sock->sk;
1465         struct sk_buff *skb;
1466         int copied, err;
1467         unsigned int skblen;
1468
1469         BT_DBG("sock %p, sk %p", sock, sk);
1470
1471         if (flags & MSG_OOB)
1472                 return -EOPNOTSUPP;
1473
1474         if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1475                 return -EOPNOTSUPP;
1476
1477         if (sk->sk_state == BT_CLOSED)
1478                 return 0;
1479
1480         skb = skb_recv_datagram(sk, flags, noblock, &err);
1481         if (!skb)
1482                 return err;
1483
1484         skblen = skb->len;
1485         copied = skb->len;
1486         if (len < copied) {
1487                 msg->msg_flags |= MSG_TRUNC;
1488                 copied = len;
1489         }
1490
1491         skb_reset_transport_header(skb);
1492         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1493
1494         switch (hci_pi(sk)->channel) {
1495         case HCI_CHANNEL_RAW:
1496                 hci_sock_cmsg(sk, msg, skb);
1497                 break;
1498         case HCI_CHANNEL_USER:
1499         case HCI_CHANNEL_MONITOR:
1500                 sock_recv_timestamp(msg, sk, skb);
1501                 break;
1502         default:
1503                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1504                         sock_recv_timestamp(msg, sk, skb);
1505                 break;
1506         }
1507
1508         skb_free_datagram(sk, skb);
1509
1510         if (flags & MSG_TRUNC)
1511                 copied = skblen;
1512
1513         return err ? : copied;
1514 }
1515
1516 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1517                         struct msghdr *msg, size_t msglen)
1518 {
1519         void *buf;
1520         u8 *cp;
1521         struct mgmt_hdr *hdr;
1522         u16 opcode, index, len;
1523         struct hci_dev *hdev = NULL;
1524         const struct hci_mgmt_handler *handler;
1525         bool var_len, no_hdev;
1526         int err;
1527
1528         BT_DBG("got %zu bytes", msglen);
1529
1530         if (msglen < sizeof(*hdr))
1531                 return -EINVAL;
1532
1533         buf = kmalloc(msglen, GFP_KERNEL);
1534         if (!buf)
1535                 return -ENOMEM;
1536
1537         if (memcpy_from_msg(buf, msg, msglen)) {
1538                 err = -EFAULT;
1539                 goto done;
1540         }
1541
1542         hdr = buf;
1543         opcode = __le16_to_cpu(hdr->opcode);
1544         index = __le16_to_cpu(hdr->index);
1545         len = __le16_to_cpu(hdr->len);
1546
1547         if (len != msglen - sizeof(*hdr)) {
1548                 err = -EINVAL;
1549                 goto done;
1550         }
1551
1552         if (chan->channel == HCI_CHANNEL_CONTROL) {
1553                 struct sk_buff *skb;
1554
1555                 /* Send event to monitor */
1556                 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1557                                                   buf + sizeof(*hdr));
1558                 if (skb) {
1559                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1560                                             HCI_SOCK_TRUSTED, NULL);
1561                         kfree_skb(skb);
1562                 }
1563         }
1564
1565         if (opcode >= chan->handler_count ||
1566             chan->handlers[opcode].func == NULL) {
1567                 BT_DBG("Unknown op %u", opcode);
1568                 err = mgmt_cmd_status(sk, index, opcode,
1569                                       MGMT_STATUS_UNKNOWN_COMMAND);
1570                 goto done;
1571         }
1572
1573         handler = &chan->handlers[opcode];
1574
1575         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1576             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1577                 err = mgmt_cmd_status(sk, index, opcode,
1578                                       MGMT_STATUS_PERMISSION_DENIED);
1579                 goto done;
1580         }
1581
1582         if (index != MGMT_INDEX_NONE) {
1583                 hdev = hci_dev_get(index);
1584                 if (!hdev) {
1585                         err = mgmt_cmd_status(sk, index, opcode,
1586                                               MGMT_STATUS_INVALID_INDEX);
1587                         goto done;
1588                 }
1589
1590                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1591                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1592                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1593                         err = mgmt_cmd_status(sk, index, opcode,
1594                                               MGMT_STATUS_INVALID_INDEX);
1595                         goto done;
1596                 }
1597
1598                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1599                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1600                         err = mgmt_cmd_status(sk, index, opcode,
1601                                               MGMT_STATUS_INVALID_INDEX);
1602                         goto done;
1603                 }
1604         }
1605
1606         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1607         if (no_hdev != !hdev) {
1608                 err = mgmt_cmd_status(sk, index, opcode,
1609                                       MGMT_STATUS_INVALID_INDEX);
1610                 goto done;
1611         }
1612
1613         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1614         if ((var_len && len < handler->data_len) ||
1615             (!var_len && len != handler->data_len)) {
1616                 err = mgmt_cmd_status(sk, index, opcode,
1617                                       MGMT_STATUS_INVALID_PARAMS);
1618                 goto done;
1619         }
1620
1621         if (hdev && chan->hdev_init)
1622                 chan->hdev_init(sk, hdev);
1623
1624         cp = buf + sizeof(*hdr);
1625
1626         err = handler->func(sk, hdev, cp, len);
1627         if (err < 0)
1628                 goto done;
1629
1630         err = msglen;
1631
1632 done:
1633         if (hdev)
1634                 hci_dev_put(hdev);
1635
1636         kfree(buf);
1637         return err;
1638 }
1639
1640 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1641 {
1642         struct hci_mon_hdr *hdr;
1643         struct sk_buff *skb;
1644         struct hci_dev *hdev;
1645         u16 index;
1646         int err;
1647
1648         /* The logging frame consists at minimum of the standard header,
1649          * the priority byte, the ident length byte and at least one string
1650          * terminator NUL byte. Anything shorter are invalid packets.
1651          */
1652         if (len < sizeof(*hdr) + 3)
1653                 return -EINVAL;
1654
1655         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1656         if (!skb)
1657                 return err;
1658
1659         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1660                 err = -EFAULT;
1661                 goto drop;
1662         }
1663
1664         hdr = (void *)skb->data;
1665
1666         if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1667                 err = -EINVAL;
1668                 goto drop;
1669         }
1670
1671         if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1672                 __u8 priority = skb->data[sizeof(*hdr)];
1673                 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1674
1675                 /* Only the priorities 0-7 are valid and with that any other
1676                  * value results in an invalid packet.
1677                  *
1678                  * The priority byte is followed by an ident length byte and
1679                  * the NUL terminated ident string. Check that the ident
1680                  * length is not overflowing the packet and also that the
1681                  * ident string itself is NUL terminated. In case the ident
1682                  * length is zero, the length value actually doubles as NUL
1683                  * terminator identifier.
1684                  *
1685                  * The message follows the ident string (if present) and
1686                  * must be NUL terminated. Otherwise it is not a valid packet.
1687                  */
1688                 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1689                     ident_len > len - sizeof(*hdr) - 3 ||
1690                     skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1691                         err = -EINVAL;
1692                         goto drop;
1693                 }
1694         } else {
1695                 err = -EINVAL;
1696                 goto drop;
1697         }
1698
1699         index = __le16_to_cpu(hdr->index);
1700
1701         if (index != MGMT_INDEX_NONE) {
1702                 hdev = hci_dev_get(index);
1703                 if (!hdev) {
1704                         err = -ENODEV;
1705                         goto drop;
1706                 }
1707         } else {
1708                 hdev = NULL;
1709         }
1710
1711         hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1712
1713         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1714         err = len;
1715
1716         if (hdev)
1717                 hci_dev_put(hdev);
1718
1719 drop:
1720         kfree_skb(skb);
1721         return err;
1722 }
1723
1724 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1725                             size_t len)
1726 {
1727         struct sock *sk = sock->sk;
1728         struct hci_mgmt_chan *chan;
1729         struct hci_dev *hdev;
1730         struct sk_buff *skb;
1731         int err;
1732
1733         BT_DBG("sock %p sk %p", sock, sk);
1734
1735         if (msg->msg_flags & MSG_OOB)
1736                 return -EOPNOTSUPP;
1737
1738         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1739                                MSG_CMSG_COMPAT))
1740                 return -EINVAL;
1741
1742         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1743                 return -EINVAL;
1744
1745         lock_sock(sk);
1746
1747         switch (hci_pi(sk)->channel) {
1748         case HCI_CHANNEL_RAW:
1749         case HCI_CHANNEL_USER:
1750                 break;
1751         case HCI_CHANNEL_MONITOR:
1752                 err = -EOPNOTSUPP;
1753                 goto done;
1754         case HCI_CHANNEL_LOGGING:
1755                 err = hci_logging_frame(sk, msg, len);
1756                 goto done;
1757         default:
1758                 mutex_lock(&mgmt_chan_list_lock);
1759                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1760                 if (chan)
1761                         err = hci_mgmt_cmd(chan, sk, msg, len);
1762                 else
1763                         err = -EINVAL;
1764
1765                 mutex_unlock(&mgmt_chan_list_lock);
1766                 goto done;
1767         }
1768
1769         hdev = hci_hdev_from_sock(sk);
1770         if (IS_ERR(hdev)) {
1771                 err = PTR_ERR(hdev);
1772                 goto done;
1773         }
1774
1775         if (!test_bit(HCI_UP, &hdev->flags)) {
1776                 err = -ENETDOWN;
1777                 goto done;
1778         }
1779
1780         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1781         if (!skb)
1782                 goto done;
1783
1784         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1785                 err = -EFAULT;
1786                 goto drop;
1787         }
1788
1789         hci_skb_pkt_type(skb) = skb->data[0];
1790         skb_pull(skb, 1);
1791
1792         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1793                 /* No permission check is needed for user channel
1794                  * since that gets enforced when binding the socket.
1795                  *
1796                  * However check that the packet type is valid.
1797                  */
1798                 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1799                     hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1800                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1801                         err = -EINVAL;
1802                         goto drop;
1803                 }
1804
1805                 skb_queue_tail(&hdev->raw_q, skb);
1806                 queue_work(hdev->workqueue, &hdev->tx_work);
1807         } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1808                 u16 opcode = get_unaligned_le16(skb->data);
1809                 u16 ogf = hci_opcode_ogf(opcode);
1810                 u16 ocf = hci_opcode_ocf(opcode);
1811
1812                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1813                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1814                                    &hci_sec_filter.ocf_mask[ogf])) &&
1815                     !capable(CAP_NET_RAW)) {
1816                         err = -EPERM;
1817                         goto drop;
1818                 }
1819
1820                 /* Since the opcode has already been extracted here, store
1821                  * a copy of the value for later use by the drivers.
1822                  */
1823                 hci_skb_opcode(skb) = opcode;
1824
1825                 if (ogf == 0x3f) {
1826                         skb_queue_tail(&hdev->raw_q, skb);
1827                         queue_work(hdev->workqueue, &hdev->tx_work);
1828                 } else {
1829                         /* Stand-alone HCI commands must be flagged as
1830                          * single-command requests.
1831                          */
1832                         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1833
1834                         skb_queue_tail(&hdev->cmd_q, skb);
1835                         queue_work(hdev->workqueue, &hdev->cmd_work);
1836                 }
1837         } else {
1838                 if (!capable(CAP_NET_RAW)) {
1839                         err = -EPERM;
1840                         goto drop;
1841                 }
1842
1843                 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1844                     hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1845                         err = -EINVAL;
1846                         goto drop;
1847                 }
1848
1849                 skb_queue_tail(&hdev->raw_q, skb);
1850                 queue_work(hdev->workqueue, &hdev->tx_work);
1851         }
1852
1853         err = len;
1854
1855 done:
1856         release_sock(sk);
1857         return err;
1858
1859 drop:
1860         kfree_skb(skb);
1861         goto done;
1862 }
1863
1864 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1865                                char __user *optval, unsigned int len)
1866 {
1867         struct hci_ufilter uf = { .opcode = 0 };
1868         struct sock *sk = sock->sk;
1869         int err = 0, opt = 0;
1870
1871         BT_DBG("sk %p, opt %d", sk, optname);
1872
1873         if (level != SOL_HCI)
1874                 return -ENOPROTOOPT;
1875
1876         lock_sock(sk);
1877
1878         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1879                 err = -EBADFD;
1880                 goto done;
1881         }
1882
1883         switch (optname) {
1884         case HCI_DATA_DIR:
1885                 if (get_user(opt, (int __user *)optval)) {
1886                         err = -EFAULT;
1887                         break;
1888                 }
1889
1890                 if (opt)
1891                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1892                 else
1893                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1894                 break;
1895
1896         case HCI_TIME_STAMP:
1897                 if (get_user(opt, (int __user *)optval)) {
1898                         err = -EFAULT;
1899                         break;
1900                 }
1901
1902                 if (opt)
1903                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1904                 else
1905                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1906                 break;
1907
1908         case HCI_FILTER:
1909                 {
1910                         struct hci_filter *f = &hci_pi(sk)->filter;
1911
1912                         uf.type_mask = f->type_mask;
1913                         uf.opcode    = f->opcode;
1914                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1915                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1916                 }
1917
1918                 len = min_t(unsigned int, len, sizeof(uf));
1919                 if (copy_from_user(&uf, optval, len)) {
1920                         err = -EFAULT;
1921                         break;
1922                 }
1923
1924                 if (!capable(CAP_NET_RAW)) {
1925                         uf.type_mask &= hci_sec_filter.type_mask;
1926                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1927                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1928                 }
1929
1930                 {
1931                         struct hci_filter *f = &hci_pi(sk)->filter;
1932
1933                         f->type_mask = uf.type_mask;
1934                         f->opcode    = uf.opcode;
1935                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1936                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1937                 }
1938                 break;
1939
1940         default:
1941                 err = -ENOPROTOOPT;
1942                 break;
1943         }
1944
1945 done:
1946         release_sock(sk);
1947         return err;
1948 }
1949
1950 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1951                                char __user *optval, int __user *optlen)
1952 {
1953         struct hci_ufilter uf;
1954         struct sock *sk = sock->sk;
1955         int len, opt, err = 0;
1956
1957         BT_DBG("sk %p, opt %d", sk, optname);
1958
1959         if (level != SOL_HCI)
1960                 return -ENOPROTOOPT;
1961
1962         if (get_user(len, optlen))
1963                 return -EFAULT;
1964
1965         lock_sock(sk);
1966
1967         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1968                 err = -EBADFD;
1969                 goto done;
1970         }
1971
1972         switch (optname) {
1973         case HCI_DATA_DIR:
1974                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1975                         opt = 1;
1976                 else
1977                         opt = 0;
1978
1979                 if (put_user(opt, optval))
1980                         err = -EFAULT;
1981                 break;
1982
1983         case HCI_TIME_STAMP:
1984                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1985                         opt = 1;
1986                 else
1987                         opt = 0;
1988
1989                 if (put_user(opt, optval))
1990                         err = -EFAULT;
1991                 break;
1992
1993         case HCI_FILTER:
1994                 {
1995                         struct hci_filter *f = &hci_pi(sk)->filter;
1996
1997                         memset(&uf, 0, sizeof(uf));
1998                         uf.type_mask = f->type_mask;
1999                         uf.opcode    = f->opcode;
2000                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2001                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2002                 }
2003
2004                 len = min_t(unsigned int, len, sizeof(uf));
2005                 if (copy_to_user(optval, &uf, len))
2006                         err = -EFAULT;
2007                 break;
2008
2009         default:
2010                 err = -ENOPROTOOPT;
2011                 break;
2012         }
2013
2014 done:
2015         release_sock(sk);
2016         return err;
2017 }
2018
2019 static void hci_sock_destruct(struct sock *sk)
2020 {
2021         skb_queue_purge(&sk->sk_receive_queue);
2022         skb_queue_purge(&sk->sk_write_queue);
2023 }
2024
2025 static const struct proto_ops hci_sock_ops = {
2026         .family         = PF_BLUETOOTH,
2027         .owner          = THIS_MODULE,
2028         .release        = hci_sock_release,
2029         .bind           = hci_sock_bind,
2030         .getname        = hci_sock_getname,
2031         .sendmsg        = hci_sock_sendmsg,
2032         .recvmsg        = hci_sock_recvmsg,
2033         .ioctl          = hci_sock_ioctl,
2034         .poll           = datagram_poll,
2035         .listen         = sock_no_listen,
2036         .shutdown       = sock_no_shutdown,
2037         .setsockopt     = hci_sock_setsockopt,
2038         .getsockopt     = hci_sock_getsockopt,
2039         .connect        = sock_no_connect,
2040         .socketpair     = sock_no_socketpair,
2041         .accept         = sock_no_accept,
2042         .mmap           = sock_no_mmap
2043 };
2044
2045 static struct proto hci_sk_proto = {
2046         .name           = "HCI",
2047         .owner          = THIS_MODULE,
2048         .obj_size       = sizeof(struct hci_pinfo)
2049 };
2050
2051 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2052                            int kern)
2053 {
2054         struct sock *sk;
2055
2056         BT_DBG("sock %p", sock);
2057
2058         if (sock->type != SOCK_RAW)
2059                 return -ESOCKTNOSUPPORT;
2060
2061         sock->ops = &hci_sock_ops;
2062
2063         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2064         if (!sk)
2065                 return -ENOMEM;
2066
2067         sock_init_data(sock, sk);
2068
2069         sock_reset_flag(sk, SOCK_ZAPPED);
2070
2071         sk->sk_protocol = protocol;
2072
2073         sock->state = SS_UNCONNECTED;
2074         sk->sk_state = BT_OPEN;
2075         sk->sk_destruct = hci_sock_destruct;
2076
2077         bt_sock_link(&hci_sk_list, sk);
2078         return 0;
2079 }
2080
2081 static const struct net_proto_family hci_sock_family_ops = {
2082         .family = PF_BLUETOOTH,
2083         .owner  = THIS_MODULE,
2084         .create = hci_sock_create,
2085 };
2086
2087 int __init hci_sock_init(void)
2088 {
2089         int err;
2090
2091         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2092
2093         err = proto_register(&hci_sk_proto, 0);
2094         if (err < 0)
2095                 return err;
2096
2097         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2098         if (err < 0) {
2099                 BT_ERR("HCI socket registration failed");
2100                 goto error;
2101         }
2102
2103         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2104         if (err < 0) {
2105                 BT_ERR("Failed to create HCI proc file");
2106                 bt_sock_unregister(BTPROTO_HCI);
2107                 goto error;
2108         }
2109
2110         BT_INFO("HCI socket layer initialized");
2111
2112         return 0;
2113
2114 error:
2115         proto_unregister(&hci_sk_proto);
2116         return err;
2117 }
2118
2119 void hci_sock_cleanup(void)
2120 {
2121         bt_procfs_cleanup(&init_net, "hci");
2122         bt_sock_unregister(BTPROTO_HCI);
2123         proto_unregister(&hci_sk_proto);
2124 }