GNU Linux-libre 4.9.309-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active, then there is no need for the vendor callback.
152          *
153          * Instead just store the desired value. If needed the setting
154          * will be programmed when the controller gets powered on.
155          */
156         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157             !test_bit(HCI_RUNNING, &hdev->flags))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_PRIMARY:
264                 bredr_init(req);
265                 break;
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269         default:
270                 BT_ERR("Unknown device type %d", hdev->dev_type);
271                 break;
272         }
273
274         return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279         __le16 param;
280         __u8 flt_type;
281
282         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285         /* Read Class of Device */
286         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288         /* Read Local Name */
289         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291         /* Read Voice Setting */
292         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294         /* Read Number of Supported IAC */
295         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297         /* Read Current IAC LAP */
298         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300         /* Clear Event Filters */
301         flt_type = HCI_FLT_CLEAR_ALL;
302         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304         /* Connection accept timeout ~20 secs */
305         param = cpu_to_le16(0x7d00);
306         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311         struct hci_dev *hdev = req->hdev;
312
313         /* Read LE Buffer Size */
314         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316         /* Read LE Local Supported Features */
317         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319         /* Read LE Supported States */
320         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322         /* LE-only controllers have LE implicitly enabled */
323         if (!lmp_bredr_capable(hdev))
324                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329         struct hci_dev *hdev = req->hdev;
330
331         /* The second byte is 0xff instead of 0x9f (two reserved bits
332          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333          * command otherwise.
334          */
335         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338          * any event mask for pre 1.2 devices.
339          */
340         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341                 return;
342
343         if (lmp_bredr_capable(hdev)) {
344                 events[4] |= 0x01; /* Flow Specification Complete */
345         } else {
346                 /* Use a different default for LE-only devices */
347                 memset(events, 0, sizeof(events));
348                 events[1] |= 0x20; /* Command Complete */
349                 events[1] |= 0x40; /* Command Status */
350                 events[1] |= 0x80; /* Hardware Error */
351
352                 /* If the controller supports the Disconnect command, enable
353                  * the corresponding event. In addition enable packet flow
354                  * control related events.
355                  */
356                 if (hdev->commands[0] & 0x20) {
357                         events[0] |= 0x10; /* Disconnection Complete */
358                         events[2] |= 0x04; /* Number of Completed Packets */
359                         events[3] |= 0x02; /* Data Buffer Overflow */
360                 }
361
362                 /* If the controller supports the Read Remote Version
363                  * Information command, enable the corresponding event.
364                  */
365                 if (hdev->commands[2] & 0x80)
366                         events[1] |= 0x08; /* Read Remote Version Information
367                                             * Complete
368                                             */
369
370                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371                         events[0] |= 0x80; /* Encryption Change */
372                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
373                 }
374         }
375
376         if (lmp_inq_rssi_capable(hdev) ||
377             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378                 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380         if (lmp_ext_feat_capable(hdev))
381                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383         if (lmp_esco_capable(hdev)) {
384                 events[5] |= 0x08; /* Synchronous Connection Complete */
385                 events[5] |= 0x10; /* Synchronous Connection Changed */
386         }
387
388         if (lmp_sniffsubr_capable(hdev))
389                 events[5] |= 0x20; /* Sniff Subrating */
390
391         if (lmp_pause_enc_capable(hdev))
392                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394         if (lmp_ext_inq_capable(hdev))
395                 events[5] |= 0x40; /* Extended Inquiry Result */
396
397         if (lmp_no_flush_capable(hdev))
398                 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400         if (lmp_lsto_capable(hdev))
401                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403         if (lmp_ssp_capable(hdev)) {
404                 events[6] |= 0x01;      /* IO Capability Request */
405                 events[6] |= 0x02;      /* IO Capability Response */
406                 events[6] |= 0x04;      /* User Confirmation Request */
407                 events[6] |= 0x08;      /* User Passkey Request */
408                 events[6] |= 0x10;      /* Remote OOB Data Request */
409                 events[6] |= 0x20;      /* Simple Pairing Complete */
410                 events[7] |= 0x04;      /* User Passkey Notification */
411                 events[7] |= 0x08;      /* Keypress Notification */
412                 events[7] |= 0x10;      /* Remote Host Supported
413                                          * Features Notification
414                                          */
415         }
416
417         if (lmp_le_capable(hdev))
418                 events[7] |= 0x20;      /* LE Meta-Event */
419
420         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425         struct hci_dev *hdev = req->hdev;
426
427         if (hdev->dev_type == HCI_AMP)
428                 return amp_init2(req);
429
430         if (lmp_bredr_capable(hdev))
431                 bredr_setup(req);
432         else
433                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435         if (lmp_le_capable(hdev))
436                 le_setup(req);
437
438         /* All Bluetooth 1.2 and later controllers should support the
439          * HCI command for reading the local supported commands.
440          *
441          * Unfortunately some controllers indicate Bluetooth 1.2 support,
442          * but do not have support for this command. If that is the case,
443          * the driver can quirk the behavior and skip reading the local
444          * supported commands.
445          */
446         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450         if (lmp_ssp_capable(hdev)) {
451                 /* When SSP is available, then the host features page
452                  * should also be available as well. However some
453                  * controllers list the max_page as 0 as long as SSP
454                  * has not been enabled. To achieve proper debugging
455                  * output, force the minimum max_page to 1 at least.
456                  */
457                 hdev->max_page = 0x01;
458
459                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460                         u8 mode = 0x01;
461
462                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463                                     sizeof(mode), &mode);
464                 } else {
465                         struct hci_cp_write_eir cp;
466
467                         memset(hdev->eir, 0, sizeof(hdev->eir));
468                         memset(&cp, 0, sizeof(cp));
469
470                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471                 }
472         }
473
474         if (lmp_inq_rssi_capable(hdev) ||
475             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476                 u8 mode;
477
478                 /* If Extended Inquiry Result events are supported, then
479                  * they are clearly preferred over Inquiry Result with RSSI
480                  * events.
481                  */
482                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485         }
486
487         if (lmp_inq_tx_pwr_capable(hdev))
488                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490         if (lmp_ext_feat_capable(hdev)) {
491                 struct hci_cp_read_local_ext_features cp;
492
493                 cp.page = 0x01;
494                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495                             sizeof(cp), &cp);
496         }
497
498         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499                 u8 enable = 1;
500                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501                             &enable);
502         }
503
504         return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510         struct hci_cp_write_def_link_policy cp;
511         u16 link_policy = 0;
512
513         if (lmp_rswitch_capable(hdev))
514                 link_policy |= HCI_LP_RSWITCH;
515         if (lmp_hold_capable(hdev))
516                 link_policy |= HCI_LP_HOLD;
517         if (lmp_sniff_capable(hdev))
518                 link_policy |= HCI_LP_SNIFF;
519         if (lmp_park_capable(hdev))
520                 link_policy |= HCI_LP_PARK;
521
522         cp.policy = cpu_to_le16(link_policy);
523         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529         struct hci_cp_write_le_host_supported cp;
530
531         /* LE-only devices do not support explicit enablement */
532         if (!lmp_bredr_capable(hdev))
533                 return;
534
535         memset(&cp, 0, sizeof(cp));
536
537         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538                 cp.le = 0x01;
539                 cp.simul = 0x00;
540         }
541
542         if (cp.le != lmp_host_le_capable(hdev))
543                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544                             &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549         struct hci_dev *hdev = req->hdev;
550         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551         bool changed = false;
552
553         /* If Connectionless Slave Broadcast master role is supported
554          * enable all necessary events for it.
555          */
556         if (lmp_csb_master_capable(hdev)) {
557                 events[1] |= 0x40;      /* Triggered Clock Capture */
558                 events[1] |= 0x80;      /* Synchronization Train Complete */
559                 events[2] |= 0x10;      /* Slave Page Response Timeout */
560                 events[2] |= 0x20;      /* CSB Channel Map Change */
561                 changed = true;
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572                 changed = true;
573         }
574
575         /* Enable Authenticated Payload Timeout Expired event if supported */
576         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577                 events[2] |= 0x80;
578                 changed = true;
579         }
580
581         /* Some Broadcom based controllers indicate support for Set Event
582          * Mask Page 2 command, but then actually do not support it. Since
583          * the default value is all bits set to zero, the command is only
584          * required if the event mask has to be changed. In case no change
585          * to the event mask is needed, skip this command.
586          */
587         if (changed)
588                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589                             sizeof(events), events);
590 }
591
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594         struct hci_dev *hdev = req->hdev;
595         u8 p;
596
597         hci_setup_event_mask(req);
598
599         if (hdev->commands[6] & 0x20 &&
600             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601                 struct hci_cp_read_stored_link_key cp;
602
603                 bacpy(&cp.bdaddr, BDADDR_ANY);
604                 cp.read_all = 0x01;
605                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606         }
607
608         if (hdev->commands[5] & 0x10)
609                 hci_setup_link_policy(req);
610
611         if (hdev->commands[8] & 0x01)
612                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614         /* Some older Broadcom based Bluetooth 1.2 controllers do not
615          * support the Read Page Scan Type command. Check support for
616          * this command in the bit mask of supported commands.
617          */
618         if (hdev->commands[13] & 0x01)
619                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621         if (lmp_le_capable(hdev)) {
622                 u8 events[8];
623
624                 memset(events, 0, sizeof(events));
625
626                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627                         events[0] |= 0x10;      /* LE Long Term Key Request */
628
629                 /* If controller supports the Connection Parameters Request
630                  * Link Layer Procedure, enable the corresponding event.
631                  */
632                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633                         events[0] |= 0x20;      /* LE Remote Connection
634                                                  * Parameter Request
635                                                  */
636
637                 /* If the controller supports the Data Length Extension
638                  * feature, enable the corresponding event.
639                  */
640                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641                         events[0] |= 0x40;      /* LE Data Length Change */
642
643                 /* If the controller supports Extended Scanner Filter
644                  * Policies, enable the correspondig event.
645                  */
646                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647                         events[1] |= 0x04;      /* LE Direct Advertising
648                                                  * Report
649                                                  */
650
651                 /* If the controller supports the LE Set Scan Enable command,
652                  * enable the corresponding advertising report event.
653                  */
654                 if (hdev->commands[26] & 0x08)
655                         events[0] |= 0x02;      /* LE Advertising Report */
656
657                 /* If the controller supports the LE Create Connection
658                  * command, enable the corresponding event.
659                  */
660                 if (hdev->commands[26] & 0x10)
661                         events[0] |= 0x01;      /* LE Connection Complete */
662
663                 /* If the controller supports the LE Connection Update
664                  * command, enable the corresponding event.
665                  */
666                 if (hdev->commands[27] & 0x04)
667                         events[0] |= 0x04;      /* LE Connection Update
668                                                  * Complete
669                                                  */
670
671                 /* If the controller supports the LE Read Remote Used Features
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[27] & 0x20)
675                         events[0] |= 0x08;      /* LE Read Remote Used
676                                                  * Features Complete
677                                                  */
678
679                 /* If the controller supports the LE Read Local P-256
680                  * Public Key command, enable the corresponding event.
681                  */
682                 if (hdev->commands[34] & 0x02)
683                         events[0] |= 0x80;      /* LE Read Local P-256
684                                                  * Public Key Complete
685                                                  */
686
687                 /* If the controller supports the LE Generate DHKey
688                  * command, enable the corresponding event.
689                  */
690                 if (hdev->commands[34] & 0x04)
691                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
692
693                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
694                             events);
695
696                 if (hdev->commands[25] & 0x40) {
697                         /* Read LE Advertising Channel TX Power */
698                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
699                 }
700
701                 if (hdev->commands[26] & 0x40) {
702                         /* Read LE White List Size */
703                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
704                                     0, NULL);
705                 }
706
707                 if (hdev->commands[26] & 0x80) {
708                         /* Clear LE White List */
709                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
710                 }
711
712                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
713                         /* Read LE Maximum Data Length */
714                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
715
716                         /* Read LE Suggested Default Data Length */
717                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
718                 }
719
720                 hci_set_le_support(req);
721         }
722
723         /* Read features beyond page 1 if available */
724         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
725                 struct hci_cp_read_local_ext_features cp;
726
727                 cp.page = p;
728                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
729                             sizeof(cp), &cp);
730         }
731
732         return 0;
733 }
734
735 static int hci_init4_req(struct hci_request *req, unsigned long opt)
736 {
737         struct hci_dev *hdev = req->hdev;
738
739         /* Some Broadcom based Bluetooth controllers do not support the
740          * Delete Stored Link Key command. They are clearly indicating its
741          * absence in the bit mask of supported commands.
742          *
743          * Check the supported commands and only if the the command is marked
744          * as supported send it. If not supported assume that the controller
745          * does not have actual support for stored link keys which makes this
746          * command redundant anyway.
747          *
748          * Some controllers indicate that they support handling deleting
749          * stored link keys, but they don't. The quirk lets a driver
750          * just disable this command.
751          */
752         if (hdev->commands[6] & 0x80 &&
753             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
754                 struct hci_cp_delete_stored_link_key cp;
755
756                 bacpy(&cp.bdaddr, BDADDR_ANY);
757                 cp.delete_all = 0x01;
758                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
759                             sizeof(cp), &cp);
760         }
761
762         /* Set event mask page 2 if the HCI command for it is supported */
763         if (hdev->commands[22] & 0x04)
764                 hci_set_event_mask_page_2(req);
765
766         /* Read local codec list if the HCI command is supported */
767         if (hdev->commands[29] & 0x20)
768                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
769
770         /* Get MWS transport configuration if the HCI command is supported */
771         if (hdev->commands[30] & 0x08)
772                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
773
774         /* Check for Synchronization Train support */
775         if (lmp_sync_train_capable(hdev))
776                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
777
778         /* Enable Secure Connections if supported and configured */
779         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
780             bredr_sc_enabled(hdev)) {
781                 u8 support = 0x01;
782
783                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
784                             sizeof(support), &support);
785         }
786
787         return 0;
788 }
789
790 static int __hci_init(struct hci_dev *hdev)
791 {
792         int err;
793
794         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
795         if (err < 0)
796                 return err;
797
798         if (hci_dev_test_flag(hdev, HCI_SETUP))
799                 hci_debugfs_create_basic(hdev);
800
801         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
802         if (err < 0)
803                 return err;
804
805         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
806          * BR/EDR/LE type controllers. AMP controllers only need the
807          * first two stages of init.
808          */
809         if (hdev->dev_type != HCI_PRIMARY)
810                 return 0;
811
812         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
813         if (err < 0)
814                 return err;
815
816         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
817         if (err < 0)
818                 return err;
819
820         /* This function is only called when the controller is actually in
821          * configured state. When the controller is marked as unconfigured,
822          * this initialization procedure is not run.
823          *
824          * It means that it is possible that a controller runs through its
825          * setup phase and then discovers missing settings. If that is the
826          * case, then this function will not be called. It then will only
827          * be called during the config phase.
828          *
829          * So only when in setup phase or config phase, create the debugfs
830          * entries and register the SMP channels.
831          */
832         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
833             !hci_dev_test_flag(hdev, HCI_CONFIG))
834                 return 0;
835
836         hci_debugfs_create_common(hdev);
837
838         if (lmp_bredr_capable(hdev))
839                 hci_debugfs_create_bredr(hdev);
840
841         if (lmp_le_capable(hdev))
842                 hci_debugfs_create_le(hdev);
843
844         return 0;
845 }
846
847 static int hci_init0_req(struct hci_request *req, unsigned long opt)
848 {
849         struct hci_dev *hdev = req->hdev;
850
851         BT_DBG("%s %ld", hdev->name, opt);
852
853         /* Reset */
854         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
855                 hci_reset_req(req, 0);
856
857         /* Read Local Version */
858         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
859
860         /* Read BD Address */
861         if (hdev->set_bdaddr)
862                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
863
864         return 0;
865 }
866
867 static int __hci_unconf_init(struct hci_dev *hdev)
868 {
869         int err;
870
871         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
872                 return 0;
873
874         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
875         if (err < 0)
876                 return err;
877
878         if (hci_dev_test_flag(hdev, HCI_SETUP))
879                 hci_debugfs_create_basic(hdev);
880
881         return 0;
882 }
883
884 static int hci_scan_req(struct hci_request *req, unsigned long opt)
885 {
886         __u8 scan = opt;
887
888         BT_DBG("%s %x", req->hdev->name, scan);
889
890         /* Inquiry and Page scans */
891         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
892         return 0;
893 }
894
895 static int hci_auth_req(struct hci_request *req, unsigned long opt)
896 {
897         __u8 auth = opt;
898
899         BT_DBG("%s %x", req->hdev->name, auth);
900
901         /* Authentication */
902         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
903         return 0;
904 }
905
906 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
907 {
908         __u8 encrypt = opt;
909
910         BT_DBG("%s %x", req->hdev->name, encrypt);
911
912         /* Encryption */
913         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
914         return 0;
915 }
916
917 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
918 {
919         __le16 policy = cpu_to_le16(opt);
920
921         BT_DBG("%s %x", req->hdev->name, policy);
922
923         /* Default link policy */
924         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
925         return 0;
926 }
927
928 /* Get HCI device by index.
929  * Device is held on return. */
930 struct hci_dev *hci_dev_get(int index)
931 {
932         struct hci_dev *hdev = NULL, *d;
933
934         BT_DBG("%d", index);
935
936         if (index < 0)
937                 return NULL;
938
939         read_lock(&hci_dev_list_lock);
940         list_for_each_entry(d, &hci_dev_list, list) {
941                 if (d->id == index) {
942                         hdev = hci_dev_hold(d);
943                         break;
944                 }
945         }
946         read_unlock(&hci_dev_list_lock);
947         return hdev;
948 }
949
950 /* ---- Inquiry support ---- */
951
952 bool hci_discovery_active(struct hci_dev *hdev)
953 {
954         struct discovery_state *discov = &hdev->discovery;
955
956         switch (discov->state) {
957         case DISCOVERY_FINDING:
958         case DISCOVERY_RESOLVING:
959                 return true;
960
961         default:
962                 return false;
963         }
964 }
965
966 void hci_discovery_set_state(struct hci_dev *hdev, int state)
967 {
968         int old_state = hdev->discovery.state;
969
970         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
971
972         if (old_state == state)
973                 return;
974
975         hdev->discovery.state = state;
976
977         switch (state) {
978         case DISCOVERY_STOPPED:
979                 hci_update_background_scan(hdev);
980
981                 if (old_state != DISCOVERY_STARTING)
982                         mgmt_discovering(hdev, 0);
983                 break;
984         case DISCOVERY_STARTING:
985                 break;
986         case DISCOVERY_FINDING:
987                 mgmt_discovering(hdev, 1);
988                 break;
989         case DISCOVERY_RESOLVING:
990                 break;
991         case DISCOVERY_STOPPING:
992                 break;
993         }
994 }
995
996 void hci_inquiry_cache_flush(struct hci_dev *hdev)
997 {
998         struct discovery_state *cache = &hdev->discovery;
999         struct inquiry_entry *p, *n;
1000
1001         list_for_each_entry_safe(p, n, &cache->all, all) {
1002                 list_del(&p->all);
1003                 kfree(p);
1004         }
1005
1006         INIT_LIST_HEAD(&cache->unknown);
1007         INIT_LIST_HEAD(&cache->resolve);
1008 }
1009
1010 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1011                                                bdaddr_t *bdaddr)
1012 {
1013         struct discovery_state *cache = &hdev->discovery;
1014         struct inquiry_entry *e;
1015
1016         BT_DBG("cache %p, %pMR", cache, bdaddr);
1017
1018         list_for_each_entry(e, &cache->all, all) {
1019                 if (!bacmp(&e->data.bdaddr, bdaddr))
1020                         return e;
1021         }
1022
1023         return NULL;
1024 }
1025
1026 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1027                                                        bdaddr_t *bdaddr)
1028 {
1029         struct discovery_state *cache = &hdev->discovery;
1030         struct inquiry_entry *e;
1031
1032         BT_DBG("cache %p, %pMR", cache, bdaddr);
1033
1034         list_for_each_entry(e, &cache->unknown, list) {
1035                 if (!bacmp(&e->data.bdaddr, bdaddr))
1036                         return e;
1037         }
1038
1039         return NULL;
1040 }
1041
1042 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1043                                                        bdaddr_t *bdaddr,
1044                                                        int state)
1045 {
1046         struct discovery_state *cache = &hdev->discovery;
1047         struct inquiry_entry *e;
1048
1049         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1050
1051         list_for_each_entry(e, &cache->resolve, list) {
1052                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1053                         return e;
1054                 if (!bacmp(&e->data.bdaddr, bdaddr))
1055                         return e;
1056         }
1057
1058         return NULL;
1059 }
1060
1061 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1062                                       struct inquiry_entry *ie)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct list_head *pos = &cache->resolve;
1066         struct inquiry_entry *p;
1067
1068         list_del(&ie->list);
1069
1070         list_for_each_entry(p, &cache->resolve, list) {
1071                 if (p->name_state != NAME_PENDING &&
1072                     abs(p->data.rssi) >= abs(ie->data.rssi))
1073                         break;
1074                 pos = &p->list;
1075         }
1076
1077         list_add(&ie->list, pos);
1078 }
1079
1080 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1081                              bool name_known)
1082 {
1083         struct discovery_state *cache = &hdev->discovery;
1084         struct inquiry_entry *ie;
1085         u32 flags = 0;
1086
1087         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1088
1089         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1090
1091         if (!data->ssp_mode)
1092                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1093
1094         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1095         if (ie) {
1096                 if (!ie->data.ssp_mode)
1097                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1098
1099                 if (ie->name_state == NAME_NEEDED &&
1100                     data->rssi != ie->data.rssi) {
1101                         ie->data.rssi = data->rssi;
1102                         hci_inquiry_cache_update_resolve(hdev, ie);
1103                 }
1104
1105                 goto update;
1106         }
1107
1108         /* Entry not in the cache. Add new one. */
1109         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1110         if (!ie) {
1111                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1112                 goto done;
1113         }
1114
1115         list_add(&ie->all, &cache->all);
1116
1117         if (name_known) {
1118                 ie->name_state = NAME_KNOWN;
1119         } else {
1120                 ie->name_state = NAME_NOT_KNOWN;
1121                 list_add(&ie->list, &cache->unknown);
1122         }
1123
1124 update:
1125         if (name_known && ie->name_state != NAME_KNOWN &&
1126             ie->name_state != NAME_PENDING) {
1127                 ie->name_state = NAME_KNOWN;
1128                 list_del(&ie->list);
1129         }
1130
1131         memcpy(&ie->data, data, sizeof(*data));
1132         ie->timestamp = jiffies;
1133         cache->timestamp = jiffies;
1134
1135         if (ie->name_state == NAME_NOT_KNOWN)
1136                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1137
1138 done:
1139         return flags;
1140 }
1141
1142 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1143 {
1144         struct discovery_state *cache = &hdev->discovery;
1145         struct inquiry_info *info = (struct inquiry_info *) buf;
1146         struct inquiry_entry *e;
1147         int copied = 0;
1148
1149         list_for_each_entry(e, &cache->all, all) {
1150                 struct inquiry_data *data = &e->data;
1151
1152                 if (copied >= num)
1153                         break;
1154
1155                 bacpy(&info->bdaddr, &data->bdaddr);
1156                 info->pscan_rep_mode    = data->pscan_rep_mode;
1157                 info->pscan_period_mode = data->pscan_period_mode;
1158                 info->pscan_mode        = data->pscan_mode;
1159                 memcpy(info->dev_class, data->dev_class, 3);
1160                 info->clock_offset      = data->clock_offset;
1161
1162                 info++;
1163                 copied++;
1164         }
1165
1166         BT_DBG("cache %p, copied %d", cache, copied);
1167         return copied;
1168 }
1169
1170 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1171 {
1172         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1173         struct hci_dev *hdev = req->hdev;
1174         struct hci_cp_inquiry cp;
1175
1176         BT_DBG("%s", hdev->name);
1177
1178         if (test_bit(HCI_INQUIRY, &hdev->flags))
1179                 return 0;
1180
1181         /* Start Inquiry */
1182         memcpy(&cp.lap, &ir->lap, 3);
1183         cp.length  = ir->length;
1184         cp.num_rsp = ir->num_rsp;
1185         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1186
1187         return 0;
1188 }
1189
1190 int hci_inquiry(void __user *arg)
1191 {
1192         __u8 __user *ptr = arg;
1193         struct hci_inquiry_req ir;
1194         struct hci_dev *hdev;
1195         int err = 0, do_inquiry = 0, max_rsp;
1196         long timeo;
1197         __u8 *buf;
1198
1199         if (copy_from_user(&ir, ptr, sizeof(ir)))
1200                 return -EFAULT;
1201
1202         hdev = hci_dev_get(ir.dev_id);
1203         if (!hdev)
1204                 return -ENODEV;
1205
1206         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1207                 err = -EBUSY;
1208                 goto done;
1209         }
1210
1211         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1212                 err = -EOPNOTSUPP;
1213                 goto done;
1214         }
1215
1216         if (hdev->dev_type != HCI_PRIMARY) {
1217                 err = -EOPNOTSUPP;
1218                 goto done;
1219         }
1220
1221         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1222                 err = -EOPNOTSUPP;
1223                 goto done;
1224         }
1225
1226         /* Restrict maximum inquiry length to 60 seconds */
1227         if (ir.length > 60) {
1228                 err = -EINVAL;
1229                 goto done;
1230         }
1231
1232         hci_dev_lock(hdev);
1233         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1234             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1235                 hci_inquiry_cache_flush(hdev);
1236                 do_inquiry = 1;
1237         }
1238         hci_dev_unlock(hdev);
1239
1240         timeo = ir.length * msecs_to_jiffies(2000);
1241
1242         if (do_inquiry) {
1243                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1244                                    timeo, NULL);
1245                 if (err < 0)
1246                         goto done;
1247
1248                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1249                  * cleared). If it is interrupted by a signal, return -EINTR.
1250                  */
1251                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1252                                 TASK_INTERRUPTIBLE)) {
1253                         err = -EINTR;
1254                         goto done;
1255                 }
1256         }
1257
1258         /* for unlimited number of responses we will use buffer with
1259          * 255 entries
1260          */
1261         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1262
1263         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1264          * copy it to the user space.
1265          */
1266         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1267         if (!buf) {
1268                 err = -ENOMEM;
1269                 goto done;
1270         }
1271
1272         hci_dev_lock(hdev);
1273         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1274         hci_dev_unlock(hdev);
1275
1276         BT_DBG("num_rsp %d", ir.num_rsp);
1277
1278         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1279                 ptr += sizeof(ir);
1280                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1281                                  ir.num_rsp))
1282                         err = -EFAULT;
1283         } else
1284                 err = -EFAULT;
1285
1286         kfree(buf);
1287
1288 done:
1289         hci_dev_put(hdev);
1290         return err;
1291 }
1292
1293 static int hci_dev_do_open(struct hci_dev *hdev)
1294 {
1295         int ret = 0;
1296
1297         BT_DBG("%s %p", hdev->name, hdev);
1298
1299         hci_req_sync_lock(hdev);
1300
1301         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1302                 ret = -ENODEV;
1303                 goto done;
1304         }
1305
1306         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1307             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1308                 /* Check for rfkill but allow the HCI setup stage to
1309                  * proceed (which in itself doesn't cause any RF activity).
1310                  */
1311                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1312                         ret = -ERFKILL;
1313                         goto done;
1314                 }
1315
1316                 /* Check for valid public address or a configured static
1317                  * random adddress, but let the HCI setup proceed to
1318                  * be able to determine if there is a public address
1319                  * or not.
1320                  *
1321                  * In case of user channel usage, it is not important
1322                  * if a public address or static random address is
1323                  * available.
1324                  *
1325                  * This check is only valid for BR/EDR controllers
1326                  * since AMP controllers do not have an address.
1327                  */
1328                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1329                     hdev->dev_type == HCI_PRIMARY &&
1330                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1331                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1332                         ret = -EADDRNOTAVAIL;
1333                         goto done;
1334                 }
1335         }
1336
1337         if (test_bit(HCI_UP, &hdev->flags)) {
1338                 ret = -EALREADY;
1339                 goto done;
1340         }
1341
1342         if (hdev->open(hdev)) {
1343                 ret = -EIO;
1344                 goto done;
1345         }
1346
1347         set_bit(HCI_RUNNING, &hdev->flags);
1348         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1349
1350         atomic_set(&hdev->cmd_cnt, 1);
1351         set_bit(HCI_INIT, &hdev->flags);
1352
1353         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1354                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1355
1356                 if (hdev->setup)
1357                         ret = hdev->setup(hdev);
1358
1359                 /* The transport driver can set these quirks before
1360                  * creating the HCI device or in its setup callback.
1361                  *
1362                  * In case any of them is set, the controller has to
1363                  * start up as unconfigured.
1364                  */
1365                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1366                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1367                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1368
1369                 /* For an unconfigured controller it is required to
1370                  * read at least the version information provided by
1371                  * the Read Local Version Information command.
1372                  *
1373                  * If the set_bdaddr driver callback is provided, then
1374                  * also the original Bluetooth public device address
1375                  * will be read using the Read BD Address command.
1376                  */
1377                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1378                         ret = __hci_unconf_init(hdev);
1379         }
1380
1381         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1382                 /* If public address change is configured, ensure that
1383                  * the address gets programmed. If the driver does not
1384                  * support changing the public address, fail the power
1385                  * on procedure.
1386                  */
1387                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1388                     hdev->set_bdaddr)
1389                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1390                 else
1391                         ret = -EADDRNOTAVAIL;
1392         }
1393
1394         if (!ret) {
1395                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1396                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1397                         ret = __hci_init(hdev);
1398                         if (!ret && hdev->post_init)
1399                                 ret = hdev->post_init(hdev);
1400                 }
1401         }
1402
1403         /* If the HCI Reset command is clearing all diagnostic settings,
1404          * then they need to be reprogrammed after the init procedure
1405          * completed.
1406          */
1407         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1408             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1409                 ret = hdev->set_diag(hdev, true);
1410
1411         clear_bit(HCI_INIT, &hdev->flags);
1412
1413         if (!ret) {
1414                 hci_dev_hold(hdev);
1415                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1416                 set_bit(HCI_UP, &hdev->flags);
1417                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1418                 hci_leds_update_powered(hdev, true);
1419                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1420                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1421                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1422                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1423                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1424                     hdev->dev_type == HCI_PRIMARY) {
1425                         ret = __hci_req_hci_power_on(hdev);
1426                         mgmt_power_on(hdev, ret);
1427                 }
1428         } else {
1429                 /* Init failed, cleanup */
1430                 flush_work(&hdev->tx_work);
1431
1432                 /* Since hci_rx_work() is possible to awake new cmd_work
1433                  * it should be flushed first to avoid unexpected call of
1434                  * hci_cmd_work()
1435                  */
1436                 flush_work(&hdev->rx_work);
1437                 flush_work(&hdev->cmd_work);
1438
1439                 skb_queue_purge(&hdev->cmd_q);
1440                 skb_queue_purge(&hdev->rx_q);
1441
1442                 if (hdev->flush)
1443                         hdev->flush(hdev);
1444
1445                 if (hdev->sent_cmd) {
1446                         kfree_skb(hdev->sent_cmd);
1447                         hdev->sent_cmd = NULL;
1448                 }
1449
1450                 clear_bit(HCI_RUNNING, &hdev->flags);
1451                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1452
1453                 hdev->close(hdev);
1454                 hdev->flags &= BIT(HCI_RAW);
1455         }
1456
1457 done:
1458         hci_req_sync_unlock(hdev);
1459         return ret;
1460 }
1461
1462 /* ---- HCI ioctl helpers ---- */
1463
1464 int hci_dev_open(__u16 dev)
1465 {
1466         struct hci_dev *hdev;
1467         int err;
1468
1469         hdev = hci_dev_get(dev);
1470         if (!hdev)
1471                 return -ENODEV;
1472
1473         /* Devices that are marked as unconfigured can only be powered
1474          * up as user channel. Trying to bring them up as normal devices
1475          * will result into a failure. Only user channel operation is
1476          * possible.
1477          *
1478          * When this function is called for a user channel, the flag
1479          * HCI_USER_CHANNEL will be set first before attempting to
1480          * open the device.
1481          */
1482         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1483             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1484                 err = -EOPNOTSUPP;
1485                 goto done;
1486         }
1487
1488         /* We need to ensure that no other power on/off work is pending
1489          * before proceeding to call hci_dev_do_open. This is
1490          * particularly important if the setup procedure has not yet
1491          * completed.
1492          */
1493         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1494                 cancel_delayed_work(&hdev->power_off);
1495
1496         /* After this call it is guaranteed that the setup procedure
1497          * has finished. This means that error conditions like RFKILL
1498          * or no valid public or static random address apply.
1499          */
1500         flush_workqueue(hdev->req_workqueue);
1501
1502         /* For controllers not using the management interface and that
1503          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1504          * so that pairing works for them. Once the management interface
1505          * is in use this bit will be cleared again and userspace has
1506          * to explicitly enable it.
1507          */
1508         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1509             !hci_dev_test_flag(hdev, HCI_MGMT))
1510                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1511
1512         err = hci_dev_do_open(hdev);
1513
1514 done:
1515         hci_dev_put(hdev);
1516         return err;
1517 }
1518
1519 /* This function requires the caller holds hdev->lock */
1520 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1521 {
1522         struct hci_conn_params *p;
1523
1524         list_for_each_entry(p, &hdev->le_conn_params, list) {
1525                 if (p->conn) {
1526                         hci_conn_drop(p->conn);
1527                         hci_conn_put(p->conn);
1528                         p->conn = NULL;
1529                 }
1530                 list_del_init(&p->action);
1531         }
1532
1533         BT_DBG("All LE pending actions cleared");
1534 }
1535
1536 int hci_dev_do_close(struct hci_dev *hdev)
1537 {
1538         bool auto_off;
1539
1540         BT_DBG("%s %p", hdev->name, hdev);
1541
1542         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1543             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1544             test_bit(HCI_UP, &hdev->flags)) {
1545                 /* Execute vendor specific shutdown routine */
1546                 if (hdev->shutdown)
1547                         hdev->shutdown(hdev);
1548         }
1549
1550         cancel_delayed_work(&hdev->power_off);
1551
1552         hci_request_cancel_all(hdev);
1553         hci_req_sync_lock(hdev);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557             test_bit(HCI_UP, &hdev->flags)) {
1558                 /* Execute vendor specific shutdown routine */
1559                 if (hdev->shutdown)
1560                         hdev->shutdown(hdev);
1561         }
1562
1563         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1564                 cancel_delayed_work_sync(&hdev->cmd_timer);
1565                 hci_req_sync_unlock(hdev);
1566                 return 0;
1567         }
1568
1569         hci_leds_update_powered(hdev, false);
1570
1571         /* Flush RX and TX works */
1572         flush_work(&hdev->tx_work);
1573         flush_work(&hdev->rx_work);
1574
1575         if (hdev->discov_timeout > 0) {
1576                 hdev->discov_timeout = 0;
1577                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1578                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1579         }
1580
1581         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1582                 cancel_delayed_work(&hdev->service_cache);
1583
1584         if (hci_dev_test_flag(hdev, HCI_MGMT))
1585                 cancel_delayed_work_sync(&hdev->rpa_expired);
1586
1587         /* Avoid potential lockdep warnings from the *_flush() calls by
1588          * ensuring the workqueue is empty up front.
1589          */
1590         drain_workqueue(hdev->workqueue);
1591
1592         hci_dev_lock(hdev);
1593
1594         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1595
1596         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1597
1598         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1599             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1600             hci_dev_test_flag(hdev, HCI_MGMT))
1601                 __mgmt_power_off(hdev);
1602
1603         hci_inquiry_cache_flush(hdev);
1604         hci_pend_le_actions_clear(hdev);
1605         hci_conn_hash_flush(hdev);
1606         hci_dev_unlock(hdev);
1607
1608         smp_unregister(hdev);
1609
1610         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1611
1612         if (hdev->flush)
1613                 hdev->flush(hdev);
1614
1615         /* Reset device */
1616         skb_queue_purge(&hdev->cmd_q);
1617         atomic_set(&hdev->cmd_cnt, 1);
1618         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1619             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1620                 set_bit(HCI_INIT, &hdev->flags);
1621                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1622                 clear_bit(HCI_INIT, &hdev->flags);
1623         }
1624
1625         /* flush cmd  work */
1626         flush_work(&hdev->cmd_work);
1627
1628         /* Drop queues */
1629         skb_queue_purge(&hdev->rx_q);
1630         skb_queue_purge(&hdev->cmd_q);
1631         skb_queue_purge(&hdev->raw_q);
1632
1633         /* Drop last sent command */
1634         if (hdev->sent_cmd) {
1635                 cancel_delayed_work_sync(&hdev->cmd_timer);
1636                 kfree_skb(hdev->sent_cmd);
1637                 hdev->sent_cmd = NULL;
1638         }
1639
1640         clear_bit(HCI_RUNNING, &hdev->flags);
1641         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1642
1643         /* After this point our queues are empty
1644          * and no tasks are scheduled. */
1645         hdev->close(hdev);
1646
1647         /* Clear flags */
1648         hdev->flags &= BIT(HCI_RAW);
1649         hci_dev_clear_volatile_flags(hdev);
1650
1651         /* Controller radio is available but is currently powered down */
1652         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1653
1654         memset(hdev->eir, 0, sizeof(hdev->eir));
1655         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1656         bacpy(&hdev->random_addr, BDADDR_ANY);
1657
1658         hci_req_sync_unlock(hdev);
1659
1660         hci_dev_put(hdev);
1661         return 0;
1662 }
1663
1664 int hci_dev_close(__u16 dev)
1665 {
1666         struct hci_dev *hdev;
1667         int err;
1668
1669         hdev = hci_dev_get(dev);
1670         if (!hdev)
1671                 return -ENODEV;
1672
1673         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1674                 err = -EBUSY;
1675                 goto done;
1676         }
1677
1678         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1679                 cancel_delayed_work(&hdev->power_off);
1680
1681         err = hci_dev_do_close(hdev);
1682
1683 done:
1684         hci_dev_put(hdev);
1685         return err;
1686 }
1687
1688 static int hci_dev_do_reset(struct hci_dev *hdev)
1689 {
1690         int ret;
1691
1692         BT_DBG("%s %p", hdev->name, hdev);
1693
1694         hci_req_sync_lock(hdev);
1695
1696         /* Drop queues */
1697         skb_queue_purge(&hdev->rx_q);
1698         skb_queue_purge(&hdev->cmd_q);
1699
1700         /* Avoid potential lockdep warnings from the *_flush() calls by
1701          * ensuring the workqueue is empty up front.
1702          */
1703         drain_workqueue(hdev->workqueue);
1704
1705         hci_dev_lock(hdev);
1706         hci_inquiry_cache_flush(hdev);
1707         hci_conn_hash_flush(hdev);
1708         hci_dev_unlock(hdev);
1709
1710         if (hdev->flush)
1711                 hdev->flush(hdev);
1712
1713         atomic_set(&hdev->cmd_cnt, 1);
1714         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1715
1716         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1717
1718         hci_req_sync_unlock(hdev);
1719         return ret;
1720 }
1721
1722 int hci_dev_reset(__u16 dev)
1723 {
1724         struct hci_dev *hdev;
1725         int err;
1726
1727         hdev = hci_dev_get(dev);
1728         if (!hdev)
1729                 return -ENODEV;
1730
1731         if (!test_bit(HCI_UP, &hdev->flags)) {
1732                 err = -ENETDOWN;
1733                 goto done;
1734         }
1735
1736         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1737                 err = -EBUSY;
1738                 goto done;
1739         }
1740
1741         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1742                 err = -EOPNOTSUPP;
1743                 goto done;
1744         }
1745
1746         err = hci_dev_do_reset(hdev);
1747
1748 done:
1749         hci_dev_put(hdev);
1750         return err;
1751 }
1752
1753 int hci_dev_reset_stat(__u16 dev)
1754 {
1755         struct hci_dev *hdev;
1756         int ret = 0;
1757
1758         hdev = hci_dev_get(dev);
1759         if (!hdev)
1760                 return -ENODEV;
1761
1762         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1763                 ret = -EBUSY;
1764                 goto done;
1765         }
1766
1767         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1768                 ret = -EOPNOTSUPP;
1769                 goto done;
1770         }
1771
1772         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1773
1774 done:
1775         hci_dev_put(hdev);
1776         return ret;
1777 }
1778
1779 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1780 {
1781         bool conn_changed, discov_changed;
1782
1783         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1784
1785         if ((scan & SCAN_PAGE))
1786                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1787                                                           HCI_CONNECTABLE);
1788         else
1789                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1790                                                            HCI_CONNECTABLE);
1791
1792         if ((scan & SCAN_INQUIRY)) {
1793                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1794                                                             HCI_DISCOVERABLE);
1795         } else {
1796                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1797                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1798                                                              HCI_DISCOVERABLE);
1799         }
1800
1801         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1802                 return;
1803
1804         if (conn_changed || discov_changed) {
1805                 /* In case this was disabled through mgmt */
1806                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1807
1808                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1809                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1810
1811                 mgmt_new_settings(hdev);
1812         }
1813 }
1814
1815 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1816 {
1817         struct hci_dev *hdev;
1818         struct hci_dev_req dr;
1819         int err = 0;
1820
1821         if (copy_from_user(&dr, arg, sizeof(dr)))
1822                 return -EFAULT;
1823
1824         hdev = hci_dev_get(dr.dev_id);
1825         if (!hdev)
1826                 return -ENODEV;
1827
1828         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1829                 err = -EBUSY;
1830                 goto done;
1831         }
1832
1833         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1834                 err = -EOPNOTSUPP;
1835                 goto done;
1836         }
1837
1838         if (hdev->dev_type != HCI_PRIMARY) {
1839                 err = -EOPNOTSUPP;
1840                 goto done;
1841         }
1842
1843         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1844                 err = -EOPNOTSUPP;
1845                 goto done;
1846         }
1847
1848         switch (cmd) {
1849         case HCISETAUTH:
1850                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1851                                    HCI_INIT_TIMEOUT, NULL);
1852                 break;
1853
1854         case HCISETENCRYPT:
1855                 if (!lmp_encrypt_capable(hdev)) {
1856                         err = -EOPNOTSUPP;
1857                         break;
1858                 }
1859
1860                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1861                         /* Auth must be enabled first */
1862                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1863                                            HCI_INIT_TIMEOUT, NULL);
1864                         if (err)
1865                                 break;
1866                 }
1867
1868                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1869                                    HCI_INIT_TIMEOUT, NULL);
1870                 break;
1871
1872         case HCISETSCAN:
1873                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1874                                    HCI_INIT_TIMEOUT, NULL);
1875
1876                 /* Ensure that the connectable and discoverable states
1877                  * get correctly modified as this was a non-mgmt change.
1878                  */
1879                 if (!err)
1880                         hci_update_scan_state(hdev, dr.dev_opt);
1881                 break;
1882
1883         case HCISETLINKPOL:
1884                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1885                                    HCI_INIT_TIMEOUT, NULL);
1886                 break;
1887
1888         case HCISETLINKMODE:
1889                 hdev->link_mode = ((__u16) dr.dev_opt) &
1890                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1891                 break;
1892
1893         case HCISETPTYPE:
1894                 hdev->pkt_type = (__u16) dr.dev_opt;
1895                 break;
1896
1897         case HCISETACLMTU:
1898                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1899                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1900                 break;
1901
1902         case HCISETSCOMTU:
1903                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1904                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1905                 break;
1906
1907         default:
1908                 err = -EINVAL;
1909                 break;
1910         }
1911
1912 done:
1913         hci_dev_put(hdev);
1914         return err;
1915 }
1916
1917 int hci_get_dev_list(void __user *arg)
1918 {
1919         struct hci_dev *hdev;
1920         struct hci_dev_list_req *dl;
1921         struct hci_dev_req *dr;
1922         int n = 0, size, err;
1923         __u16 dev_num;
1924
1925         if (get_user(dev_num, (__u16 __user *) arg))
1926                 return -EFAULT;
1927
1928         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1929                 return -EINVAL;
1930
1931         size = sizeof(*dl) + dev_num * sizeof(*dr);
1932
1933         dl = kzalloc(size, GFP_KERNEL);
1934         if (!dl)
1935                 return -ENOMEM;
1936
1937         dr = dl->dev_req;
1938
1939         read_lock(&hci_dev_list_lock);
1940         list_for_each_entry(hdev, &hci_dev_list, list) {
1941                 unsigned long flags = hdev->flags;
1942
1943                 /* When the auto-off is configured it means the transport
1944                  * is running, but in that case still indicate that the
1945                  * device is actually down.
1946                  */
1947                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1948                         flags &= ~BIT(HCI_UP);
1949
1950                 (dr + n)->dev_id  = hdev->id;
1951                 (dr + n)->dev_opt = flags;
1952
1953                 if (++n >= dev_num)
1954                         break;
1955         }
1956         read_unlock(&hci_dev_list_lock);
1957
1958         dl->dev_num = n;
1959         size = sizeof(*dl) + n * sizeof(*dr);
1960
1961         err = copy_to_user(arg, dl, size);
1962         kfree(dl);
1963
1964         return err ? -EFAULT : 0;
1965 }
1966
1967 int hci_get_dev_info(void __user *arg)
1968 {
1969         struct hci_dev *hdev;
1970         struct hci_dev_info di;
1971         unsigned long flags;
1972         int err = 0;
1973
1974         if (copy_from_user(&di, arg, sizeof(di)))
1975                 return -EFAULT;
1976
1977         hdev = hci_dev_get(di.dev_id);
1978         if (!hdev)
1979                 return -ENODEV;
1980
1981         /* When the auto-off is configured it means the transport
1982          * is running, but in that case still indicate that the
1983          * device is actually down.
1984          */
1985         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1986                 flags = hdev->flags & ~BIT(HCI_UP);
1987         else
1988                 flags = hdev->flags;
1989
1990         strcpy(di.name, hdev->name);
1991         di.bdaddr   = hdev->bdaddr;
1992         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1993         di.flags    = flags;
1994         di.pkt_type = hdev->pkt_type;
1995         if (lmp_bredr_capable(hdev)) {
1996                 di.acl_mtu  = hdev->acl_mtu;
1997                 di.acl_pkts = hdev->acl_pkts;
1998                 di.sco_mtu  = hdev->sco_mtu;
1999                 di.sco_pkts = hdev->sco_pkts;
2000         } else {
2001                 di.acl_mtu  = hdev->le_mtu;
2002                 di.acl_pkts = hdev->le_pkts;
2003                 di.sco_mtu  = 0;
2004                 di.sco_pkts = 0;
2005         }
2006         di.link_policy = hdev->link_policy;
2007         di.link_mode   = hdev->link_mode;
2008
2009         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2010         memcpy(&di.features, &hdev->features, sizeof(di.features));
2011
2012         if (copy_to_user(arg, &di, sizeof(di)))
2013                 err = -EFAULT;
2014
2015         hci_dev_put(hdev);
2016
2017         return err;
2018 }
2019
2020 /* ---- Interface to HCI drivers ---- */
2021
2022 static int hci_rfkill_set_block(void *data, bool blocked)
2023 {
2024         struct hci_dev *hdev = data;
2025
2026         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2027
2028         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2029                 return -EBUSY;
2030
2031         if (blocked) {
2032                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2033                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2034                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2035                         hci_dev_do_close(hdev);
2036         } else {
2037                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2038         }
2039
2040         return 0;
2041 }
2042
2043 static const struct rfkill_ops hci_rfkill_ops = {
2044         .set_block = hci_rfkill_set_block,
2045 };
2046
2047 static void hci_power_on(struct work_struct *work)
2048 {
2049         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2050         int err;
2051
2052         BT_DBG("%s", hdev->name);
2053
2054         if (test_bit(HCI_UP, &hdev->flags) &&
2055             hci_dev_test_flag(hdev, HCI_MGMT) &&
2056             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2057                 cancel_delayed_work(&hdev->power_off);
2058                 hci_req_sync_lock(hdev);
2059                 err = __hci_req_hci_power_on(hdev);
2060                 hci_req_sync_unlock(hdev);
2061                 mgmt_power_on(hdev, err);
2062                 return;
2063         }
2064
2065         err = hci_dev_do_open(hdev);
2066         if (err < 0) {
2067                 hci_dev_lock(hdev);
2068                 mgmt_set_powered_failed(hdev, err);
2069                 hci_dev_unlock(hdev);
2070                 return;
2071         }
2072
2073         /* During the HCI setup phase, a few error conditions are
2074          * ignored and they need to be checked now. If they are still
2075          * valid, it is important to turn the device back off.
2076          */
2077         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2078             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2079             (hdev->dev_type == HCI_PRIMARY &&
2080              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2081              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2082                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2083                 hci_dev_do_close(hdev);
2084         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2085                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2086                                    HCI_AUTO_OFF_TIMEOUT);
2087         }
2088
2089         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2090                 /* For unconfigured devices, set the HCI_RAW flag
2091                  * so that userspace can easily identify them.
2092                  */
2093                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2094                         set_bit(HCI_RAW, &hdev->flags);
2095
2096                 /* For fully configured devices, this will send
2097                  * the Index Added event. For unconfigured devices,
2098                  * it will send Unconfigued Index Added event.
2099                  *
2100                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2101                  * and no event will be send.
2102                  */
2103                 mgmt_index_added(hdev);
2104         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2105                 /* When the controller is now configured, then it
2106                  * is important to clear the HCI_RAW flag.
2107                  */
2108                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2109                         clear_bit(HCI_RAW, &hdev->flags);
2110
2111                 /* Powering on the controller with HCI_CONFIG set only
2112                  * happens with the transition from unconfigured to
2113                  * configured. This will send the Index Added event.
2114                  */
2115                 mgmt_index_added(hdev);
2116         }
2117 }
2118
2119 static void hci_power_off(struct work_struct *work)
2120 {
2121         struct hci_dev *hdev = container_of(work, struct hci_dev,
2122                                             power_off.work);
2123
2124         BT_DBG("%s", hdev->name);
2125
2126         hci_dev_do_close(hdev);
2127 }
2128
2129 static void hci_error_reset(struct work_struct *work)
2130 {
2131         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2132
2133         BT_DBG("%s", hdev->name);
2134
2135         if (hdev->hw_error)
2136                 hdev->hw_error(hdev, hdev->hw_error_code);
2137         else
2138                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2139                        hdev->hw_error_code);
2140
2141         if (hci_dev_do_close(hdev))
2142                 return;
2143
2144         hci_dev_do_open(hdev);
2145 }
2146
2147 void hci_uuids_clear(struct hci_dev *hdev)
2148 {
2149         struct bt_uuid *uuid, *tmp;
2150
2151         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2152                 list_del(&uuid->list);
2153                 kfree(uuid);
2154         }
2155 }
2156
2157 void hci_link_keys_clear(struct hci_dev *hdev)
2158 {
2159         struct link_key *key;
2160
2161         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2162                 list_del_rcu(&key->list);
2163                 kfree_rcu(key, rcu);
2164         }
2165 }
2166
2167 void hci_smp_ltks_clear(struct hci_dev *hdev)
2168 {
2169         struct smp_ltk *k;
2170
2171         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2172                 list_del_rcu(&k->list);
2173                 kfree_rcu(k, rcu);
2174         }
2175 }
2176
2177 void hci_smp_irks_clear(struct hci_dev *hdev)
2178 {
2179         struct smp_irk *k;
2180
2181         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2182                 list_del_rcu(&k->list);
2183                 kfree_rcu(k, rcu);
2184         }
2185 }
2186
2187 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2188 {
2189         struct link_key *k;
2190
2191         rcu_read_lock();
2192         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2193                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2194                         rcu_read_unlock();
2195                         return k;
2196                 }
2197         }
2198         rcu_read_unlock();
2199
2200         return NULL;
2201 }
2202
2203 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2204                                u8 key_type, u8 old_key_type)
2205 {
2206         /* Legacy key */
2207         if (key_type < 0x03)
2208                 return true;
2209
2210         /* Debug keys are insecure so don't store them persistently */
2211         if (key_type == HCI_LK_DEBUG_COMBINATION)
2212                 return false;
2213
2214         /* Changed combination key and there's no previous one */
2215         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2216                 return false;
2217
2218         /* Security mode 3 case */
2219         if (!conn)
2220                 return true;
2221
2222         /* BR/EDR key derived using SC from an LE link */
2223         if (conn->type == LE_LINK)
2224                 return true;
2225
2226         /* Neither local nor remote side had no-bonding as requirement */
2227         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2228                 return true;
2229
2230         /* Local side had dedicated bonding as requirement */
2231         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2232                 return true;
2233
2234         /* Remote side had dedicated bonding as requirement */
2235         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2236                 return true;
2237
2238         /* If none of the above criteria match, then don't store the key
2239          * persistently */
2240         return false;
2241 }
2242
2243 static u8 ltk_role(u8 type)
2244 {
2245         if (type == SMP_LTK)
2246                 return HCI_ROLE_MASTER;
2247
2248         return HCI_ROLE_SLAVE;
2249 }
2250
2251 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2252                              u8 addr_type, u8 role)
2253 {
2254         struct smp_ltk *k;
2255
2256         rcu_read_lock();
2257         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2258                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2259                         continue;
2260
2261                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2262                         rcu_read_unlock();
2263                         return k;
2264                 }
2265         }
2266         rcu_read_unlock();
2267
2268         return NULL;
2269 }
2270
2271 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2272 {
2273         struct smp_irk *irk;
2274
2275         rcu_read_lock();
2276         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2277                 if (!bacmp(&irk->rpa, rpa)) {
2278                         rcu_read_unlock();
2279                         return irk;
2280                 }
2281         }
2282
2283         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2284                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2285                         bacpy(&irk->rpa, rpa);
2286                         rcu_read_unlock();
2287                         return irk;
2288                 }
2289         }
2290         rcu_read_unlock();
2291
2292         return NULL;
2293 }
2294
2295 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2296                                      u8 addr_type)
2297 {
2298         struct smp_irk *irk;
2299
2300         /* Identity Address must be public or static random */
2301         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2302                 return NULL;
2303
2304         rcu_read_lock();
2305         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2306                 if (addr_type == irk->addr_type &&
2307                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2308                         rcu_read_unlock();
2309                         return irk;
2310                 }
2311         }
2312         rcu_read_unlock();
2313
2314         return NULL;
2315 }
2316
2317 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2318                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2319                                   u8 pin_len, bool *persistent)
2320 {
2321         struct link_key *key, *old_key;
2322         u8 old_key_type;
2323
2324         old_key = hci_find_link_key(hdev, bdaddr);
2325         if (old_key) {
2326                 old_key_type = old_key->type;
2327                 key = old_key;
2328         } else {
2329                 old_key_type = conn ? conn->key_type : 0xff;
2330                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2331                 if (!key)
2332                         return NULL;
2333                 list_add_rcu(&key->list, &hdev->link_keys);
2334         }
2335
2336         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2337
2338         /* Some buggy controller combinations generate a changed
2339          * combination key for legacy pairing even when there's no
2340          * previous key */
2341         if (type == HCI_LK_CHANGED_COMBINATION &&
2342             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2343                 type = HCI_LK_COMBINATION;
2344                 if (conn)
2345                         conn->key_type = type;
2346         }
2347
2348         bacpy(&key->bdaddr, bdaddr);
2349         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2350         key->pin_len = pin_len;
2351
2352         if (type == HCI_LK_CHANGED_COMBINATION)
2353                 key->type = old_key_type;
2354         else
2355                 key->type = type;
2356
2357         if (persistent)
2358                 *persistent = hci_persistent_key(hdev, conn, type,
2359                                                  old_key_type);
2360
2361         return key;
2362 }
2363
2364 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2365                             u8 addr_type, u8 type, u8 authenticated,
2366                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2367 {
2368         struct smp_ltk *key, *old_key;
2369         u8 role = ltk_role(type);
2370
2371         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2372         if (old_key)
2373                 key = old_key;
2374         else {
2375                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2376                 if (!key)
2377                         return NULL;
2378                 list_add_rcu(&key->list, &hdev->long_term_keys);
2379         }
2380
2381         bacpy(&key->bdaddr, bdaddr);
2382         key->bdaddr_type = addr_type;
2383         memcpy(key->val, tk, sizeof(key->val));
2384         key->authenticated = authenticated;
2385         key->ediv = ediv;
2386         key->rand = rand;
2387         key->enc_size = enc_size;
2388         key->type = type;
2389
2390         return key;
2391 }
2392
2393 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2395 {
2396         struct smp_irk *irk;
2397
2398         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2399         if (!irk) {
2400                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2401                 if (!irk)
2402                         return NULL;
2403
2404                 bacpy(&irk->bdaddr, bdaddr);
2405                 irk->addr_type = addr_type;
2406
2407                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2408         }
2409
2410         memcpy(irk->val, val, 16);
2411         bacpy(&irk->rpa, rpa);
2412
2413         return irk;
2414 }
2415
2416 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2417 {
2418         struct link_key *key;
2419
2420         key = hci_find_link_key(hdev, bdaddr);
2421         if (!key)
2422                 return -ENOENT;
2423
2424         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2425
2426         list_del_rcu(&key->list);
2427         kfree_rcu(key, rcu);
2428
2429         return 0;
2430 }
2431
2432 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2433 {
2434         struct smp_ltk *k;
2435         int removed = 0;
2436
2437         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2438                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2439                         continue;
2440
2441                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2442
2443                 list_del_rcu(&k->list);
2444                 kfree_rcu(k, rcu);
2445                 removed++;
2446         }
2447
2448         return removed ? 0 : -ENOENT;
2449 }
2450
2451 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2452 {
2453         struct smp_irk *k;
2454
2455         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2456                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2457                         continue;
2458
2459                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2460
2461                 list_del_rcu(&k->list);
2462                 kfree_rcu(k, rcu);
2463         }
2464 }
2465
2466 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2467 {
2468         struct smp_ltk *k;
2469         struct smp_irk *irk;
2470         u8 addr_type;
2471
2472         if (type == BDADDR_BREDR) {
2473                 if (hci_find_link_key(hdev, bdaddr))
2474                         return true;
2475                 return false;
2476         }
2477
2478         /* Convert to HCI addr type which struct smp_ltk uses */
2479         if (type == BDADDR_LE_PUBLIC)
2480                 addr_type = ADDR_LE_DEV_PUBLIC;
2481         else
2482                 addr_type = ADDR_LE_DEV_RANDOM;
2483
2484         irk = hci_get_irk(hdev, bdaddr, addr_type);
2485         if (irk) {
2486                 bdaddr = &irk->bdaddr;
2487                 addr_type = irk->addr_type;
2488         }
2489
2490         rcu_read_lock();
2491         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2492                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2493                         rcu_read_unlock();
2494                         return true;
2495                 }
2496         }
2497         rcu_read_unlock();
2498
2499         return false;
2500 }
2501
2502 /* HCI command timer function */
2503 static void hci_cmd_timeout(struct work_struct *work)
2504 {
2505         struct hci_dev *hdev = container_of(work, struct hci_dev,
2506                                             cmd_timer.work);
2507
2508         if (hdev->sent_cmd) {
2509                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2510                 u16 opcode = __le16_to_cpu(sent->opcode);
2511
2512                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2513         } else {
2514                 BT_ERR("%s command tx timeout", hdev->name);
2515         }
2516
2517         atomic_set(&hdev->cmd_cnt, 1);
2518         queue_work(hdev->workqueue, &hdev->cmd_work);
2519 }
2520
2521 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2522                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2523 {
2524         struct oob_data *data;
2525
2526         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2527                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2528                         continue;
2529                 if (data->bdaddr_type != bdaddr_type)
2530                         continue;
2531                 return data;
2532         }
2533
2534         return NULL;
2535 }
2536
2537 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2538                                u8 bdaddr_type)
2539 {
2540         struct oob_data *data;
2541
2542         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2543         if (!data)
2544                 return -ENOENT;
2545
2546         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2547
2548         list_del(&data->list);
2549         kfree(data);
2550
2551         return 0;
2552 }
2553
2554 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2555 {
2556         struct oob_data *data, *n;
2557
2558         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2559                 list_del(&data->list);
2560                 kfree(data);
2561         }
2562 }
2563
2564 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2565                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2566                             u8 *hash256, u8 *rand256)
2567 {
2568         struct oob_data *data;
2569
2570         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2571         if (!data) {
2572                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2573                 if (!data)
2574                         return -ENOMEM;
2575
2576                 bacpy(&data->bdaddr, bdaddr);
2577                 data->bdaddr_type = bdaddr_type;
2578                 list_add(&data->list, &hdev->remote_oob_data);
2579         }
2580
2581         if (hash192 && rand192) {
2582                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2583                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2584                 if (hash256 && rand256)
2585                         data->present = 0x03;
2586         } else {
2587                 memset(data->hash192, 0, sizeof(data->hash192));
2588                 memset(data->rand192, 0, sizeof(data->rand192));
2589                 if (hash256 && rand256)
2590                         data->present = 0x02;
2591                 else
2592                         data->present = 0x00;
2593         }
2594
2595         if (hash256 && rand256) {
2596                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2597                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2598         } else {
2599                 memset(data->hash256, 0, sizeof(data->hash256));
2600                 memset(data->rand256, 0, sizeof(data->rand256));
2601                 if (hash192 && rand192)
2602                         data->present = 0x01;
2603         }
2604
2605         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2606
2607         return 0;
2608 }
2609
2610 /* This function requires the caller holds hdev->lock */
2611 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2612 {
2613         struct adv_info *adv_instance;
2614
2615         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2616                 if (adv_instance->instance == instance)
2617                         return adv_instance;
2618         }
2619
2620         return NULL;
2621 }
2622
2623 /* This function requires the caller holds hdev->lock */
2624 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2625 {
2626         struct adv_info *cur_instance;
2627
2628         cur_instance = hci_find_adv_instance(hdev, instance);
2629         if (!cur_instance)
2630                 return NULL;
2631
2632         if (cur_instance == list_last_entry(&hdev->adv_instances,
2633                                             struct adv_info, list))
2634                 return list_first_entry(&hdev->adv_instances,
2635                                                  struct adv_info, list);
2636         else
2637                 return list_next_entry(cur_instance, list);
2638 }
2639
2640 /* This function requires the caller holds hdev->lock */
2641 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2642 {
2643         struct adv_info *adv_instance;
2644
2645         adv_instance = hci_find_adv_instance(hdev, instance);
2646         if (!adv_instance)
2647                 return -ENOENT;
2648
2649         BT_DBG("%s removing %dMR", hdev->name, instance);
2650
2651         if (hdev->cur_adv_instance == instance) {
2652                 if (hdev->adv_instance_timeout) {
2653                         cancel_delayed_work(&hdev->adv_instance_expire);
2654                         hdev->adv_instance_timeout = 0;
2655                 }
2656                 hdev->cur_adv_instance = 0x00;
2657         }
2658
2659         list_del(&adv_instance->list);
2660         kfree(adv_instance);
2661
2662         hdev->adv_instance_cnt--;
2663
2664         return 0;
2665 }
2666
2667 /* This function requires the caller holds hdev->lock */
2668 void hci_adv_instances_clear(struct hci_dev *hdev)
2669 {
2670         struct adv_info *adv_instance, *n;
2671
2672         if (hdev->adv_instance_timeout) {
2673                 cancel_delayed_work(&hdev->adv_instance_expire);
2674                 hdev->adv_instance_timeout = 0;
2675         }
2676
2677         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2678                 list_del(&adv_instance->list);
2679                 kfree(adv_instance);
2680         }
2681
2682         hdev->adv_instance_cnt = 0;
2683         hdev->cur_adv_instance = 0x00;
2684 }
2685
2686 /* This function requires the caller holds hdev->lock */
2687 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2688                          u16 adv_data_len, u8 *adv_data,
2689                          u16 scan_rsp_len, u8 *scan_rsp_data,
2690                          u16 timeout, u16 duration)
2691 {
2692         struct adv_info *adv_instance;
2693
2694         adv_instance = hci_find_adv_instance(hdev, instance);
2695         if (adv_instance) {
2696                 memset(adv_instance->adv_data, 0,
2697                        sizeof(adv_instance->adv_data));
2698                 memset(adv_instance->scan_rsp_data, 0,
2699                        sizeof(adv_instance->scan_rsp_data));
2700         } else {
2701                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2702                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2703                         return -EOVERFLOW;
2704
2705                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2706                 if (!adv_instance)
2707                         return -ENOMEM;
2708
2709                 adv_instance->pending = true;
2710                 adv_instance->instance = instance;
2711                 list_add(&adv_instance->list, &hdev->adv_instances);
2712                 hdev->adv_instance_cnt++;
2713         }
2714
2715         adv_instance->flags = flags;
2716         adv_instance->adv_data_len = adv_data_len;
2717         adv_instance->scan_rsp_len = scan_rsp_len;
2718
2719         if (adv_data_len)
2720                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2721
2722         if (scan_rsp_len)
2723                 memcpy(adv_instance->scan_rsp_data,
2724                        scan_rsp_data, scan_rsp_len);
2725
2726         adv_instance->timeout = timeout;
2727         adv_instance->remaining_time = timeout;
2728
2729         if (duration == 0)
2730                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2731         else
2732                 adv_instance->duration = duration;
2733
2734         BT_DBG("%s for %dMR", hdev->name, instance);
2735
2736         return 0;
2737 }
2738
2739 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2740                                          bdaddr_t *bdaddr, u8 type)
2741 {
2742         struct bdaddr_list *b;
2743
2744         list_for_each_entry(b, bdaddr_list, list) {
2745                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2746                         return b;
2747         }
2748
2749         return NULL;
2750 }
2751
2752 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2753 {
2754         struct bdaddr_list *b, *n;
2755
2756         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2757                 list_del(&b->list);
2758                 kfree(b);
2759         }
2760 }
2761
2762 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2763 {
2764         struct bdaddr_list *entry;
2765
2766         if (!bacmp(bdaddr, BDADDR_ANY))
2767                 return -EBADF;
2768
2769         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2770                 return -EEXIST;
2771
2772         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2773         if (!entry)
2774                 return -ENOMEM;
2775
2776         bacpy(&entry->bdaddr, bdaddr);
2777         entry->bdaddr_type = type;
2778
2779         list_add(&entry->list, list);
2780
2781         return 0;
2782 }
2783
2784 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2785 {
2786         struct bdaddr_list *entry;
2787
2788         if (!bacmp(bdaddr, BDADDR_ANY)) {
2789                 hci_bdaddr_list_clear(list);
2790                 return 0;
2791         }
2792
2793         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2794         if (!entry)
2795                 return -ENOENT;
2796
2797         list_del(&entry->list);
2798         kfree(entry);
2799
2800         return 0;
2801 }
2802
2803 /* This function requires the caller holds hdev->lock */
2804 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2805                                                bdaddr_t *addr, u8 addr_type)
2806 {
2807         struct hci_conn_params *params;
2808
2809         list_for_each_entry(params, &hdev->le_conn_params, list) {
2810                 if (bacmp(&params->addr, addr) == 0 &&
2811                     params->addr_type == addr_type) {
2812                         return params;
2813                 }
2814         }
2815
2816         return NULL;
2817 }
2818
2819 /* This function requires the caller holds hdev->lock */
2820 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2821                                                   bdaddr_t *addr, u8 addr_type)
2822 {
2823         struct hci_conn_params *param;
2824
2825         list_for_each_entry(param, list, action) {
2826                 if (bacmp(&param->addr, addr) == 0 &&
2827                     param->addr_type == addr_type)
2828                         return param;
2829         }
2830
2831         return NULL;
2832 }
2833
2834 /* This function requires the caller holds hdev->lock */
2835 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2836                                             bdaddr_t *addr, u8 addr_type)
2837 {
2838         struct hci_conn_params *params;
2839
2840         params = hci_conn_params_lookup(hdev, addr, addr_type);
2841         if (params)
2842                 return params;
2843
2844         params = kzalloc(sizeof(*params), GFP_KERNEL);
2845         if (!params) {
2846                 BT_ERR("Out of memory");
2847                 return NULL;
2848         }
2849
2850         bacpy(&params->addr, addr);
2851         params->addr_type = addr_type;
2852
2853         list_add(&params->list, &hdev->le_conn_params);
2854         INIT_LIST_HEAD(&params->action);
2855
2856         params->conn_min_interval = hdev->le_conn_min_interval;
2857         params->conn_max_interval = hdev->le_conn_max_interval;
2858         params->conn_latency = hdev->le_conn_latency;
2859         params->supervision_timeout = hdev->le_supv_timeout;
2860         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2861
2862         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2863
2864         return params;
2865 }
2866
2867 static void hci_conn_params_free(struct hci_conn_params *params)
2868 {
2869         if (params->conn) {
2870                 hci_conn_drop(params->conn);
2871                 hci_conn_put(params->conn);
2872         }
2873
2874         list_del(&params->action);
2875         list_del(&params->list);
2876         kfree(params);
2877 }
2878
2879 /* This function requires the caller holds hdev->lock */
2880 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2881 {
2882         struct hci_conn_params *params;
2883
2884         params = hci_conn_params_lookup(hdev, addr, addr_type);
2885         if (!params)
2886                 return;
2887
2888         hci_conn_params_free(params);
2889
2890         hci_update_background_scan(hdev);
2891
2892         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2893 }
2894
2895 /* This function requires the caller holds hdev->lock */
2896 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2897 {
2898         struct hci_conn_params *params, *tmp;
2899
2900         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2901                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2902                         continue;
2903
2904                 /* If trying to estabilish one time connection to disabled
2905                  * device, leave the params, but mark them as just once.
2906                  */
2907                 if (params->explicit_connect) {
2908                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2909                         continue;
2910                 }
2911
2912                 list_del(&params->list);
2913                 kfree(params);
2914         }
2915
2916         BT_DBG("All LE disabled connection parameters were removed");
2917 }
2918
2919 /* This function requires the caller holds hdev->lock */
2920 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2921 {
2922         struct hci_conn_params *params, *tmp;
2923
2924         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2925                 hci_conn_params_free(params);
2926
2927         BT_DBG("All LE connection parameters were removed");
2928 }
2929
2930 /* Copy the Identity Address of the controller.
2931  *
2932  * If the controller has a public BD_ADDR, then by default use that one.
2933  * If this is a LE only controller without a public address, default to
2934  * the static random address.
2935  *
2936  * For debugging purposes it is possible to force controllers with a
2937  * public address to use the static random address instead.
2938  *
2939  * In case BR/EDR has been disabled on a dual-mode controller and
2940  * userspace has configured a static address, then that address
2941  * becomes the identity address instead of the public BR/EDR address.
2942  */
2943 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2944                                u8 *bdaddr_type)
2945 {
2946         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2947             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2948             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2949              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2950                 bacpy(bdaddr, &hdev->static_addr);
2951                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2952         } else {
2953                 bacpy(bdaddr, &hdev->bdaddr);
2954                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2955         }
2956 }
2957
2958 /* Alloc HCI device */
2959 struct hci_dev *hci_alloc_dev(void)
2960 {
2961         struct hci_dev *hdev;
2962
2963         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2964         if (!hdev)
2965                 return NULL;
2966
2967         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2968         hdev->esco_type = (ESCO_HV1);
2969         hdev->link_mode = (HCI_LM_ACCEPT);
2970         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2971         hdev->io_capability = 0x03;     /* No Input No Output */
2972         hdev->manufacturer = 0xffff;    /* Default to internal use */
2973         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2974         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2975         hdev->adv_instance_cnt = 0;
2976         hdev->cur_adv_instance = 0x00;
2977         hdev->adv_instance_timeout = 0;
2978
2979         hdev->sniff_max_interval = 800;
2980         hdev->sniff_min_interval = 80;
2981
2982         hdev->le_adv_channel_map = 0x07;
2983         hdev->le_adv_min_interval = 0x0800;
2984         hdev->le_adv_max_interval = 0x0800;
2985         hdev->le_scan_interval = 0x0060;
2986         hdev->le_scan_window = 0x0030;
2987         hdev->le_conn_min_interval = 0x0028;
2988         hdev->le_conn_max_interval = 0x0038;
2989         hdev->le_conn_latency = 0x0000;
2990         hdev->le_supv_timeout = 0x002a;
2991         hdev->le_def_tx_len = 0x001b;
2992         hdev->le_def_tx_time = 0x0148;
2993         hdev->le_max_tx_len = 0x001b;
2994         hdev->le_max_tx_time = 0x0148;
2995         hdev->le_max_rx_len = 0x001b;
2996         hdev->le_max_rx_time = 0x0148;
2997
2998         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2999         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3000         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3001         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3002
3003         mutex_init(&hdev->lock);
3004         mutex_init(&hdev->req_lock);
3005
3006         INIT_LIST_HEAD(&hdev->mgmt_pending);
3007         INIT_LIST_HEAD(&hdev->blacklist);
3008         INIT_LIST_HEAD(&hdev->whitelist);
3009         INIT_LIST_HEAD(&hdev->uuids);
3010         INIT_LIST_HEAD(&hdev->link_keys);
3011         INIT_LIST_HEAD(&hdev->long_term_keys);
3012         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3013         INIT_LIST_HEAD(&hdev->remote_oob_data);
3014         INIT_LIST_HEAD(&hdev->le_white_list);
3015         INIT_LIST_HEAD(&hdev->le_conn_params);
3016         INIT_LIST_HEAD(&hdev->pend_le_conns);
3017         INIT_LIST_HEAD(&hdev->pend_le_reports);
3018         INIT_LIST_HEAD(&hdev->conn_hash.list);
3019         INIT_LIST_HEAD(&hdev->adv_instances);
3020
3021         INIT_WORK(&hdev->rx_work, hci_rx_work);
3022         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3023         INIT_WORK(&hdev->tx_work, hci_tx_work);
3024         INIT_WORK(&hdev->power_on, hci_power_on);
3025         INIT_WORK(&hdev->error_reset, hci_error_reset);
3026
3027         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3028
3029         skb_queue_head_init(&hdev->rx_q);
3030         skb_queue_head_init(&hdev->cmd_q);
3031         skb_queue_head_init(&hdev->raw_q);
3032
3033         init_waitqueue_head(&hdev->req_wait_q);
3034
3035         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3036
3037         hci_request_setup(hdev);
3038
3039         hci_init_sysfs(hdev);
3040         discovery_init(hdev);
3041
3042         return hdev;
3043 }
3044 EXPORT_SYMBOL(hci_alloc_dev);
3045
3046 /* Free HCI device */
3047 void hci_free_dev(struct hci_dev *hdev)
3048 {
3049         /* will free via device release */
3050         put_device(&hdev->dev);
3051 }
3052 EXPORT_SYMBOL(hci_free_dev);
3053
3054 /* Register HCI device */
3055 int hci_register_dev(struct hci_dev *hdev)
3056 {
3057         int id, error;
3058
3059         if (!hdev->open || !hdev->close || !hdev->send)
3060                 return -EINVAL;
3061
3062         /* Do not allow HCI_AMP devices to register at index 0,
3063          * so the index can be used as the AMP controller ID.
3064          */
3065         switch (hdev->dev_type) {
3066         case HCI_PRIMARY:
3067                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3068                 break;
3069         case HCI_AMP:
3070                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3071                 break;
3072         default:
3073                 return -EINVAL;
3074         }
3075
3076         if (id < 0)
3077                 return id;
3078
3079         sprintf(hdev->name, "hci%d", id);
3080         hdev->id = id;
3081
3082         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3083
3084         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3085                                           WQ_MEM_RECLAIM, 1, hdev->name);
3086         if (!hdev->workqueue) {
3087                 error = -ENOMEM;
3088                 goto err;
3089         }
3090
3091         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3092                                               WQ_MEM_RECLAIM, 1, hdev->name);
3093         if (!hdev->req_workqueue) {
3094                 destroy_workqueue(hdev->workqueue);
3095                 error = -ENOMEM;
3096                 goto err;
3097         }
3098
3099         if (!IS_ERR_OR_NULL(bt_debugfs))
3100                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3101
3102         dev_set_name(&hdev->dev, "%s", hdev->name);
3103
3104         error = device_add(&hdev->dev);
3105         if (error < 0)
3106                 goto err_wqueue;
3107
3108         hci_leds_init(hdev);
3109
3110         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3111                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3112                                     hdev);
3113         if (hdev->rfkill) {
3114                 if (rfkill_register(hdev->rfkill) < 0) {
3115                         rfkill_destroy(hdev->rfkill);
3116                         hdev->rfkill = NULL;
3117                 }
3118         }
3119
3120         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3121                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3122
3123         hci_dev_set_flag(hdev, HCI_SETUP);
3124         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3125
3126         if (hdev->dev_type == HCI_PRIMARY) {
3127                 /* Assume BR/EDR support until proven otherwise (such as
3128                  * through reading supported features during init.
3129                  */
3130                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3131         }
3132
3133         write_lock(&hci_dev_list_lock);
3134         list_add(&hdev->list, &hci_dev_list);
3135         write_unlock(&hci_dev_list_lock);
3136
3137         /* Devices that are marked for raw-only usage are unconfigured
3138          * and should not be included in normal operation.
3139          */
3140         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3141                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3142
3143         hci_sock_dev_event(hdev, HCI_DEV_REG);
3144         hci_dev_hold(hdev);
3145
3146         queue_work(hdev->req_workqueue, &hdev->power_on);
3147
3148         return id;
3149
3150 err_wqueue:
3151         debugfs_remove_recursive(hdev->debugfs);
3152         destroy_workqueue(hdev->workqueue);
3153         destroy_workqueue(hdev->req_workqueue);
3154 err:
3155         ida_simple_remove(&hci_index_ida, hdev->id);
3156
3157         return error;
3158 }
3159 EXPORT_SYMBOL(hci_register_dev);
3160
3161 /* Unregister HCI device */
3162 void hci_unregister_dev(struct hci_dev *hdev)
3163 {
3164         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3165
3166         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3167
3168         write_lock(&hci_dev_list_lock);
3169         list_del(&hdev->list);
3170         write_unlock(&hci_dev_list_lock);
3171
3172         cancel_work_sync(&hdev->power_on);
3173
3174         hci_dev_do_close(hdev);
3175
3176         if (!test_bit(HCI_INIT, &hdev->flags) &&
3177             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3178             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3179                 hci_dev_lock(hdev);
3180                 mgmt_index_removed(hdev);
3181                 hci_dev_unlock(hdev);
3182         }
3183
3184         /* mgmt_index_removed should take care of emptying the
3185          * pending list */
3186         BUG_ON(!list_empty(&hdev->mgmt_pending));
3187
3188         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3189
3190         if (hdev->rfkill) {
3191                 rfkill_unregister(hdev->rfkill);
3192                 rfkill_destroy(hdev->rfkill);
3193         }
3194
3195         device_del(&hdev->dev);
3196         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3197         hci_dev_put(hdev);
3198 }
3199 EXPORT_SYMBOL(hci_unregister_dev);
3200
3201 /* Cleanup HCI device */
3202 void hci_cleanup_dev(struct hci_dev *hdev)
3203 {
3204         debugfs_remove_recursive(hdev->debugfs);
3205         kfree_const(hdev->hw_info);
3206         kfree_const(hdev->fw_info);
3207
3208         destroy_workqueue(hdev->workqueue);
3209         destroy_workqueue(hdev->req_workqueue);
3210
3211         hci_dev_lock(hdev);
3212         hci_bdaddr_list_clear(&hdev->blacklist);
3213         hci_bdaddr_list_clear(&hdev->whitelist);
3214         hci_uuids_clear(hdev);
3215         hci_link_keys_clear(hdev);
3216         hci_smp_ltks_clear(hdev);
3217         hci_smp_irks_clear(hdev);
3218         hci_remote_oob_data_clear(hdev);
3219         hci_adv_instances_clear(hdev);
3220         hci_bdaddr_list_clear(&hdev->le_white_list);
3221         hci_conn_params_clear_all(hdev);
3222         hci_discovery_filter_clear(hdev);
3223         hci_dev_unlock(hdev);
3224
3225         ida_simple_remove(&hci_index_ida, hdev->id);
3226 }
3227
3228 /* Suspend HCI device */
3229 int hci_suspend_dev(struct hci_dev *hdev)
3230 {
3231         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3232         return 0;
3233 }
3234 EXPORT_SYMBOL(hci_suspend_dev);
3235
3236 /* Resume HCI device */
3237 int hci_resume_dev(struct hci_dev *hdev)
3238 {
3239         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3240         return 0;
3241 }
3242 EXPORT_SYMBOL(hci_resume_dev);
3243
3244 /* Reset HCI device */
3245 int hci_reset_dev(struct hci_dev *hdev)
3246 {
3247         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3248         struct sk_buff *skb;
3249
3250         skb = bt_skb_alloc(3, GFP_ATOMIC);
3251         if (!skb)
3252                 return -ENOMEM;
3253
3254         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3255         memcpy(skb_put(skb, 3), hw_err, 3);
3256
3257         /* Send Hardware Error to upper stack */
3258         return hci_recv_frame(hdev, skb);
3259 }
3260 EXPORT_SYMBOL(hci_reset_dev);
3261
3262 /* Receive frame from HCI drivers */
3263 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3264 {
3265         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3266                       && !test_bit(HCI_INIT, &hdev->flags))) {
3267                 kfree_skb(skb);
3268                 return -ENXIO;
3269         }
3270
3271         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3272             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3273             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3274                 kfree_skb(skb);
3275                 return -EINVAL;
3276         }
3277
3278         /* Incoming skb */
3279         bt_cb(skb)->incoming = 1;
3280
3281         /* Time stamp */
3282         __net_timestamp(skb);
3283
3284         skb_queue_tail(&hdev->rx_q, skb);
3285         queue_work(hdev->workqueue, &hdev->rx_work);
3286
3287         return 0;
3288 }
3289 EXPORT_SYMBOL(hci_recv_frame);
3290
3291 /* Receive diagnostic message from HCI drivers */
3292 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3293 {
3294         /* Mark as diagnostic packet */
3295         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3296
3297         /* Time stamp */
3298         __net_timestamp(skb);
3299
3300         skb_queue_tail(&hdev->rx_q, skb);
3301         queue_work(hdev->workqueue, &hdev->rx_work);
3302
3303         return 0;
3304 }
3305 EXPORT_SYMBOL(hci_recv_diag);
3306
3307 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3308 {
3309         va_list vargs;
3310
3311         va_start(vargs, fmt);
3312         kfree_const(hdev->hw_info);
3313         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3314         va_end(vargs);
3315 }
3316 EXPORT_SYMBOL(hci_set_hw_info);
3317
3318 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3319 {
3320         va_list vargs;
3321
3322         va_start(vargs, fmt);
3323         kfree_const(hdev->fw_info);
3324         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3325         va_end(vargs);
3326 }
3327 EXPORT_SYMBOL(hci_set_fw_info);
3328
3329 /* ---- Interface to upper protocols ---- */
3330
3331 int hci_register_cb(struct hci_cb *cb)
3332 {
3333         BT_DBG("%p name %s", cb, cb->name);
3334
3335         mutex_lock(&hci_cb_list_lock);
3336         list_add_tail(&cb->list, &hci_cb_list);
3337         mutex_unlock(&hci_cb_list_lock);
3338
3339         return 0;
3340 }
3341 EXPORT_SYMBOL(hci_register_cb);
3342
3343 int hci_unregister_cb(struct hci_cb *cb)
3344 {
3345         BT_DBG("%p name %s", cb, cb->name);
3346
3347         mutex_lock(&hci_cb_list_lock);
3348         list_del(&cb->list);
3349         mutex_unlock(&hci_cb_list_lock);
3350
3351         return 0;
3352 }
3353 EXPORT_SYMBOL(hci_unregister_cb);
3354
3355 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3356 {
3357         int err;
3358
3359         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3360                skb->len);
3361
3362         /* Time stamp */
3363         __net_timestamp(skb);
3364
3365         /* Send copy to monitor */
3366         hci_send_to_monitor(hdev, skb);
3367
3368         if (atomic_read(&hdev->promisc)) {
3369                 /* Send copy to the sockets */
3370                 hci_send_to_sock(hdev, skb);
3371         }
3372
3373         /* Get rid of skb owner, prior to sending to the driver. */
3374         skb_orphan(skb);
3375
3376         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3377                 kfree_skb(skb);
3378                 return;
3379         }
3380
3381         err = hdev->send(hdev, skb);
3382         if (err < 0) {
3383                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3384                 kfree_skb(skb);
3385         }
3386 }
3387
3388 /* Send HCI command */
3389 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3390                  const void *param)
3391 {
3392         struct sk_buff *skb;
3393
3394         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3395
3396         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3397         if (!skb) {
3398                 BT_ERR("%s no memory for command", hdev->name);
3399                 return -ENOMEM;
3400         }
3401
3402         /* Stand-alone HCI commands must be flagged as
3403          * single-command requests.
3404          */
3405         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3406
3407         skb_queue_tail(&hdev->cmd_q, skb);
3408         queue_work(hdev->workqueue, &hdev->cmd_work);
3409
3410         return 0;
3411 }
3412
3413 /* Get data from the previously sent command */
3414 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3415 {
3416         struct hci_command_hdr *hdr;
3417
3418         if (!hdev->sent_cmd)
3419                 return NULL;
3420
3421         hdr = (void *) hdev->sent_cmd->data;
3422
3423         if (hdr->opcode != cpu_to_le16(opcode))
3424                 return NULL;
3425
3426         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3427
3428         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3429 }
3430
3431 /* Send HCI command and wait for command commplete event */
3432 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3433                              const void *param, u32 timeout)
3434 {
3435         struct sk_buff *skb;
3436
3437         if (!test_bit(HCI_UP, &hdev->flags))
3438                 return ERR_PTR(-ENETDOWN);
3439
3440         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3441
3442         hci_req_sync_lock(hdev);
3443         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3444         hci_req_sync_unlock(hdev);
3445
3446         return skb;
3447 }
3448 EXPORT_SYMBOL(hci_cmd_sync);
3449
3450 /* Send ACL data */
3451 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3452 {
3453         struct hci_acl_hdr *hdr;
3454         int len = skb->len;
3455
3456         skb_push(skb, HCI_ACL_HDR_SIZE);
3457         skb_reset_transport_header(skb);
3458         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3459         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3460         hdr->dlen   = cpu_to_le16(len);
3461 }
3462
3463 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3464                           struct sk_buff *skb, __u16 flags)
3465 {
3466         struct hci_conn *conn = chan->conn;
3467         struct hci_dev *hdev = conn->hdev;
3468         struct sk_buff *list;
3469
3470         skb->len = skb_headlen(skb);
3471         skb->data_len = 0;
3472
3473         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3474
3475         switch (hdev->dev_type) {
3476         case HCI_PRIMARY:
3477                 hci_add_acl_hdr(skb, conn->handle, flags);
3478                 break;
3479         case HCI_AMP:
3480                 hci_add_acl_hdr(skb, chan->handle, flags);
3481                 break;
3482         default:
3483                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3484                 return;
3485         }
3486
3487         list = skb_shinfo(skb)->frag_list;
3488         if (!list) {
3489                 /* Non fragmented */
3490                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3491
3492                 skb_queue_tail(queue, skb);
3493         } else {
3494                 /* Fragmented */
3495                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3496
3497                 skb_shinfo(skb)->frag_list = NULL;
3498
3499                 /* Queue all fragments atomically. We need to use spin_lock_bh
3500                  * here because of 6LoWPAN links, as there this function is
3501                  * called from softirq and using normal spin lock could cause
3502                  * deadlocks.
3503                  */
3504                 spin_lock_bh(&queue->lock);
3505
3506                 __skb_queue_tail(queue, skb);
3507
3508                 flags &= ~ACL_START;
3509                 flags |= ACL_CONT;
3510                 do {
3511                         skb = list; list = list->next;
3512
3513                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3514                         hci_add_acl_hdr(skb, conn->handle, flags);
3515
3516                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3517
3518                         __skb_queue_tail(queue, skb);
3519                 } while (list);
3520
3521                 spin_unlock_bh(&queue->lock);
3522         }
3523 }
3524
3525 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3526 {
3527         struct hci_dev *hdev = chan->conn->hdev;
3528
3529         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3530
3531         hci_queue_acl(chan, &chan->data_q, skb, flags);
3532
3533         queue_work(hdev->workqueue, &hdev->tx_work);
3534 }
3535
3536 /* Send SCO data */
3537 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3538 {
3539         struct hci_dev *hdev = conn->hdev;
3540         struct hci_sco_hdr hdr;
3541
3542         BT_DBG("%s len %d", hdev->name, skb->len);
3543
3544         hdr.handle = cpu_to_le16(conn->handle);
3545         hdr.dlen   = skb->len;
3546
3547         skb_push(skb, HCI_SCO_HDR_SIZE);
3548         skb_reset_transport_header(skb);
3549         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3550
3551         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3552
3553         skb_queue_tail(&conn->data_q, skb);
3554         queue_work(hdev->workqueue, &hdev->tx_work);
3555 }
3556
3557 /* ---- HCI TX task (outgoing data) ---- */
3558
3559 /* HCI Connection scheduler */
3560 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3561                                      int *quote)
3562 {
3563         struct hci_conn_hash *h = &hdev->conn_hash;
3564         struct hci_conn *conn = NULL, *c;
3565         unsigned int num = 0, min = ~0;
3566
3567         /* We don't have to lock device here. Connections are always
3568          * added and removed with TX task disabled. */
3569
3570         rcu_read_lock();
3571
3572         list_for_each_entry_rcu(c, &h->list, list) {
3573                 if (c->type != type || skb_queue_empty(&c->data_q))
3574                         continue;
3575
3576                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3577                         continue;
3578
3579                 num++;
3580
3581                 if (c->sent < min) {
3582                         min  = c->sent;
3583                         conn = c;
3584                 }
3585
3586                 if (hci_conn_num(hdev, type) == num)
3587                         break;
3588         }
3589
3590         rcu_read_unlock();
3591
3592         if (conn) {
3593                 int cnt, q;
3594
3595                 switch (conn->type) {
3596                 case ACL_LINK:
3597                         cnt = hdev->acl_cnt;
3598                         break;
3599                 case SCO_LINK:
3600                 case ESCO_LINK:
3601                         cnt = hdev->sco_cnt;
3602                         break;
3603                 case LE_LINK:
3604                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3605                         break;
3606                 default:
3607                         cnt = 0;
3608                         BT_ERR("Unknown link type");
3609                 }
3610
3611                 q = cnt / num;
3612                 *quote = q ? q : 1;
3613         } else
3614                 *quote = 0;
3615
3616         BT_DBG("conn %p quote %d", conn, *quote);
3617         return conn;
3618 }
3619
3620 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3621 {
3622         struct hci_conn_hash *h = &hdev->conn_hash;
3623         struct hci_conn *c;
3624
3625         BT_ERR("%s link tx timeout", hdev->name);
3626
3627         rcu_read_lock();
3628
3629         /* Kill stalled connections */
3630         list_for_each_entry_rcu(c, &h->list, list) {
3631                 if (c->type == type && c->sent) {
3632                         BT_ERR("%s killing stalled connection %pMR",
3633                                hdev->name, &c->dst);
3634                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3635                 }
3636         }
3637
3638         rcu_read_unlock();
3639 }
3640
3641 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3642                                       int *quote)
3643 {
3644         struct hci_conn_hash *h = &hdev->conn_hash;
3645         struct hci_chan *chan = NULL;
3646         unsigned int num = 0, min = ~0, cur_prio = 0;
3647         struct hci_conn *conn;
3648         int cnt, q, conn_num = 0;
3649
3650         BT_DBG("%s", hdev->name);
3651
3652         rcu_read_lock();
3653
3654         list_for_each_entry_rcu(conn, &h->list, list) {
3655                 struct hci_chan *tmp;
3656
3657                 if (conn->type != type)
3658                         continue;
3659
3660                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3661                         continue;
3662
3663                 conn_num++;
3664
3665                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3666                         struct sk_buff *skb;
3667
3668                         if (skb_queue_empty(&tmp->data_q))
3669                                 continue;
3670
3671                         skb = skb_peek(&tmp->data_q);
3672                         if (skb->priority < cur_prio)
3673                                 continue;
3674
3675                         if (skb->priority > cur_prio) {
3676                                 num = 0;
3677                                 min = ~0;
3678                                 cur_prio = skb->priority;
3679                         }
3680
3681                         num++;
3682
3683                         if (conn->sent < min) {
3684                                 min  = conn->sent;
3685                                 chan = tmp;
3686                         }
3687                 }
3688
3689                 if (hci_conn_num(hdev, type) == conn_num)
3690                         break;
3691         }
3692
3693         rcu_read_unlock();
3694
3695         if (!chan)
3696                 return NULL;
3697
3698         switch (chan->conn->type) {
3699         case ACL_LINK:
3700                 cnt = hdev->acl_cnt;
3701                 break;
3702         case AMP_LINK:
3703                 cnt = hdev->block_cnt;
3704                 break;
3705         case SCO_LINK:
3706         case ESCO_LINK:
3707                 cnt = hdev->sco_cnt;
3708                 break;
3709         case LE_LINK:
3710                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3711                 break;
3712         default:
3713                 cnt = 0;
3714                 BT_ERR("Unknown link type");
3715         }
3716
3717         q = cnt / num;
3718         *quote = q ? q : 1;
3719         BT_DBG("chan %p quote %d", chan, *quote);
3720         return chan;
3721 }
3722
3723 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3724 {
3725         struct hci_conn_hash *h = &hdev->conn_hash;
3726         struct hci_conn *conn;
3727         int num = 0;
3728
3729         BT_DBG("%s", hdev->name);
3730
3731         rcu_read_lock();
3732
3733         list_for_each_entry_rcu(conn, &h->list, list) {
3734                 struct hci_chan *chan;
3735
3736                 if (conn->type != type)
3737                         continue;
3738
3739                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3740                         continue;
3741
3742                 num++;
3743
3744                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3745                         struct sk_buff *skb;
3746
3747                         if (chan->sent) {
3748                                 chan->sent = 0;
3749                                 continue;
3750                         }
3751
3752                         if (skb_queue_empty(&chan->data_q))
3753                                 continue;
3754
3755                         skb = skb_peek(&chan->data_q);
3756                         if (skb->priority >= HCI_PRIO_MAX - 1)
3757                                 continue;
3758
3759                         skb->priority = HCI_PRIO_MAX - 1;
3760
3761                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3762                                skb->priority);
3763                 }
3764
3765                 if (hci_conn_num(hdev, type) == num)
3766                         break;
3767         }
3768
3769         rcu_read_unlock();
3770
3771 }
3772
3773 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3774 {
3775         /* Calculate count of blocks used by this packet */
3776         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3777 }
3778
3779 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3780 {
3781         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3782                 /* ACL tx timeout must be longer than maximum
3783                  * link supervision timeout (40.9 seconds) */
3784                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3785                                        HCI_ACL_TX_TIMEOUT))
3786                         hci_link_tx_to(hdev, ACL_LINK);
3787         }
3788 }
3789
3790 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3791 {
3792         unsigned int cnt = hdev->acl_cnt;
3793         struct hci_chan *chan;
3794         struct sk_buff *skb;
3795         int quote;
3796
3797         __check_timeout(hdev, cnt);
3798
3799         while (hdev->acl_cnt &&
3800                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3801                 u32 priority = (skb_peek(&chan->data_q))->priority;
3802                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3803                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3804                                skb->len, skb->priority);
3805
3806                         /* Stop if priority has changed */
3807                         if (skb->priority < priority)
3808                                 break;
3809
3810                         skb = skb_dequeue(&chan->data_q);
3811
3812                         hci_conn_enter_active_mode(chan->conn,
3813                                                    bt_cb(skb)->force_active);
3814
3815                         hci_send_frame(hdev, skb);
3816                         hdev->acl_last_tx = jiffies;
3817
3818                         hdev->acl_cnt--;
3819                         chan->sent++;
3820                         chan->conn->sent++;
3821                 }
3822         }
3823
3824         if (cnt != hdev->acl_cnt)
3825                 hci_prio_recalculate(hdev, ACL_LINK);
3826 }
3827
3828 static void hci_sched_acl_blk(struct hci_dev *hdev)
3829 {
3830         unsigned int cnt = hdev->block_cnt;
3831         struct hci_chan *chan;
3832         struct sk_buff *skb;
3833         int quote;
3834         u8 type;
3835
3836         __check_timeout(hdev, cnt);
3837
3838         BT_DBG("%s", hdev->name);
3839
3840         if (hdev->dev_type == HCI_AMP)
3841                 type = AMP_LINK;
3842         else
3843                 type = ACL_LINK;
3844
3845         while (hdev->block_cnt > 0 &&
3846                (chan = hci_chan_sent(hdev, type, &quote))) {
3847                 u32 priority = (skb_peek(&chan->data_q))->priority;
3848                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3849                         int blocks;
3850
3851                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3852                                skb->len, skb->priority);
3853
3854                         /* Stop if priority has changed */
3855                         if (skb->priority < priority)
3856                                 break;
3857
3858                         skb = skb_dequeue(&chan->data_q);
3859
3860                         blocks = __get_blocks(hdev, skb);
3861                         if (blocks > hdev->block_cnt)
3862                                 return;
3863
3864                         hci_conn_enter_active_mode(chan->conn,
3865                                                    bt_cb(skb)->force_active);
3866
3867                         hci_send_frame(hdev, skb);
3868                         hdev->acl_last_tx = jiffies;
3869
3870                         hdev->block_cnt -= blocks;
3871                         quote -= blocks;
3872
3873                         chan->sent += blocks;
3874                         chan->conn->sent += blocks;
3875                 }
3876         }
3877
3878         if (cnt != hdev->block_cnt)
3879                 hci_prio_recalculate(hdev, type);
3880 }
3881
3882 static void hci_sched_acl(struct hci_dev *hdev)
3883 {
3884         BT_DBG("%s", hdev->name);
3885
3886         /* No ACL link over BR/EDR controller */
3887         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3888                 return;
3889
3890         /* No AMP link over AMP controller */
3891         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3892                 return;
3893
3894         switch (hdev->flow_ctl_mode) {
3895         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3896                 hci_sched_acl_pkt(hdev);
3897                 break;
3898
3899         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3900                 hci_sched_acl_blk(hdev);
3901                 break;
3902         }
3903 }
3904
3905 /* Schedule SCO */
3906 static void hci_sched_sco(struct hci_dev *hdev)
3907 {
3908         struct hci_conn *conn;
3909         struct sk_buff *skb;
3910         int quote;
3911
3912         BT_DBG("%s", hdev->name);
3913
3914         if (!hci_conn_num(hdev, SCO_LINK))
3915                 return;
3916
3917         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3918                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3919                         BT_DBG("skb %p len %d", skb, skb->len);
3920                         hci_send_frame(hdev, skb);
3921
3922                         conn->sent++;
3923                         if (conn->sent == ~0)
3924                                 conn->sent = 0;
3925                 }
3926         }
3927 }
3928
3929 static void hci_sched_esco(struct hci_dev *hdev)
3930 {
3931         struct hci_conn *conn;
3932         struct sk_buff *skb;
3933         int quote;
3934
3935         BT_DBG("%s", hdev->name);
3936
3937         if (!hci_conn_num(hdev, ESCO_LINK))
3938                 return;
3939
3940         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3941                                                      &quote))) {
3942                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3943                         BT_DBG("skb %p len %d", skb, skb->len);
3944                         hci_send_frame(hdev, skb);
3945
3946                         conn->sent++;
3947                         if (conn->sent == ~0)
3948                                 conn->sent = 0;
3949                 }
3950         }
3951 }
3952
3953 static void hci_sched_le(struct hci_dev *hdev)
3954 {
3955         struct hci_chan *chan;
3956         struct sk_buff *skb;
3957         int quote, cnt, tmp;
3958
3959         BT_DBG("%s", hdev->name);
3960
3961         if (!hci_conn_num(hdev, LE_LINK))
3962                 return;
3963
3964         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3965                 /* LE tx timeout must be longer than maximum
3966                  * link supervision timeout (40.9 seconds) */
3967                 if (!hdev->le_cnt && hdev->le_pkts &&
3968                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3969                         hci_link_tx_to(hdev, LE_LINK);
3970         }
3971
3972         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3973         tmp = cnt;
3974         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3975                 u32 priority = (skb_peek(&chan->data_q))->priority;
3976                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3977                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3978                                skb->len, skb->priority);
3979
3980                         /* Stop if priority has changed */
3981                         if (skb->priority < priority)
3982                                 break;
3983
3984                         skb = skb_dequeue(&chan->data_q);
3985
3986                         hci_send_frame(hdev, skb);
3987                         hdev->le_last_tx = jiffies;
3988
3989                         cnt--;
3990                         chan->sent++;
3991                         chan->conn->sent++;
3992                 }
3993         }
3994
3995         if (hdev->le_pkts)
3996                 hdev->le_cnt = cnt;
3997         else
3998                 hdev->acl_cnt = cnt;
3999
4000         if (cnt != tmp)
4001                 hci_prio_recalculate(hdev, LE_LINK);
4002 }
4003
4004 static void hci_tx_work(struct work_struct *work)
4005 {
4006         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4007         struct sk_buff *skb;
4008
4009         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4010                hdev->sco_cnt, hdev->le_cnt);
4011
4012         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4013                 /* Schedule queues and send stuff to HCI driver */
4014                 hci_sched_acl(hdev);
4015                 hci_sched_sco(hdev);
4016                 hci_sched_esco(hdev);
4017                 hci_sched_le(hdev);
4018         }
4019
4020         /* Send next queued raw (unknown type) packet */
4021         while ((skb = skb_dequeue(&hdev->raw_q)))
4022                 hci_send_frame(hdev, skb);
4023 }
4024
4025 /* ----- HCI RX task (incoming data processing) ----- */
4026
4027 /* ACL data packet */
4028 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4029 {
4030         struct hci_acl_hdr *hdr = (void *) skb->data;
4031         struct hci_conn *conn;
4032         __u16 handle, flags;
4033
4034         skb_pull(skb, HCI_ACL_HDR_SIZE);
4035
4036         handle = __le16_to_cpu(hdr->handle);
4037         flags  = hci_flags(handle);
4038         handle = hci_handle(handle);
4039
4040         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4041                handle, flags);
4042
4043         hdev->stat.acl_rx++;
4044
4045         hci_dev_lock(hdev);
4046         conn = hci_conn_hash_lookup_handle(hdev, handle);
4047         hci_dev_unlock(hdev);
4048
4049         if (conn) {
4050                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4051
4052                 /* Send to upper protocol */
4053                 l2cap_recv_acldata(conn, skb, flags);
4054                 return;
4055         } else {
4056                 BT_ERR("%s ACL packet for unknown connection handle %d",
4057                        hdev->name, handle);
4058         }
4059
4060         kfree_skb(skb);
4061 }
4062
4063 /* SCO data packet */
4064 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4065 {
4066         struct hci_sco_hdr *hdr = (void *) skb->data;
4067         struct hci_conn *conn;
4068         __u16 handle;
4069
4070         skb_pull(skb, HCI_SCO_HDR_SIZE);
4071
4072         handle = __le16_to_cpu(hdr->handle);
4073
4074         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4075
4076         hdev->stat.sco_rx++;
4077
4078         hci_dev_lock(hdev);
4079         conn = hci_conn_hash_lookup_handle(hdev, handle);
4080         hci_dev_unlock(hdev);
4081
4082         if (conn) {
4083                 /* Send to upper protocol */
4084                 sco_recv_scodata(conn, skb);
4085                 return;
4086         } else {
4087                 BT_ERR("%s SCO packet for unknown connection handle %d",
4088                        hdev->name, handle);
4089         }
4090
4091         kfree_skb(skb);
4092 }
4093
4094 static bool hci_req_is_complete(struct hci_dev *hdev)
4095 {
4096         struct sk_buff *skb;
4097
4098         skb = skb_peek(&hdev->cmd_q);
4099         if (!skb)
4100                 return true;
4101
4102         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4103 }
4104
4105 static void hci_resend_last(struct hci_dev *hdev)
4106 {
4107         struct hci_command_hdr *sent;
4108         struct sk_buff *skb;
4109         u16 opcode;
4110
4111         if (!hdev->sent_cmd)
4112                 return;
4113
4114         sent = (void *) hdev->sent_cmd->data;
4115         opcode = __le16_to_cpu(sent->opcode);
4116         if (opcode == HCI_OP_RESET)
4117                 return;
4118
4119         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4120         if (!skb)
4121                 return;
4122
4123         skb_queue_head(&hdev->cmd_q, skb);
4124         queue_work(hdev->workqueue, &hdev->cmd_work);
4125 }
4126
4127 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4128                           hci_req_complete_t *req_complete,
4129                           hci_req_complete_skb_t *req_complete_skb)
4130 {
4131         struct sk_buff *skb;
4132         unsigned long flags;
4133
4134         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4135
4136         /* If the completed command doesn't match the last one that was
4137          * sent we need to do special handling of it.
4138          */
4139         if (!hci_sent_cmd_data(hdev, opcode)) {
4140                 /* Some CSR based controllers generate a spontaneous
4141                  * reset complete event during init and any pending
4142                  * command will never be completed. In such a case we
4143                  * need to resend whatever was the last sent
4144                  * command.
4145                  */
4146                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4147                         hci_resend_last(hdev);
4148
4149                 return;
4150         }
4151
4152         /* If the command succeeded and there's still more commands in
4153          * this request the request is not yet complete.
4154          */
4155         if (!status && !hci_req_is_complete(hdev))
4156                 return;
4157
4158         /* If this was the last command in a request the complete
4159          * callback would be found in hdev->sent_cmd instead of the
4160          * command queue (hdev->cmd_q).
4161          */
4162         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4163                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4164                 return;
4165         }
4166
4167         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4168                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4169                 return;
4170         }
4171
4172         /* Remove all pending commands belonging to this request */
4173         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4174         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4175                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4176                         __skb_queue_head(&hdev->cmd_q, skb);
4177                         break;
4178                 }
4179
4180                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4181                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4182                 else
4183                         *req_complete = bt_cb(skb)->hci.req_complete;
4184                 kfree_skb(skb);
4185         }
4186         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4187 }
4188
4189 static void hci_rx_work(struct work_struct *work)
4190 {
4191         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4192         struct sk_buff *skb;
4193
4194         BT_DBG("%s", hdev->name);
4195
4196         while ((skb = skb_dequeue(&hdev->rx_q))) {
4197                 /* Send copy to monitor */
4198                 hci_send_to_monitor(hdev, skb);
4199
4200                 if (atomic_read(&hdev->promisc)) {
4201                         /* Send copy to the sockets */
4202                         hci_send_to_sock(hdev, skb);
4203                 }
4204
4205                 /* If the device has been opened in HCI_USER_CHANNEL,
4206                  * the userspace has exclusive access to device.
4207                  * When device is HCI_INIT, we still need to process
4208                  * the data packets to the driver in order
4209                  * to complete its setup().
4210                  */
4211                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4212                     !test_bit(HCI_INIT, &hdev->flags)) {
4213                         kfree_skb(skb);
4214                         continue;
4215                 }
4216
4217                 if (test_bit(HCI_INIT, &hdev->flags)) {
4218                         /* Don't process data packets in this states. */
4219                         switch (hci_skb_pkt_type(skb)) {
4220                         case HCI_ACLDATA_PKT:
4221                         case HCI_SCODATA_PKT:
4222                                 kfree_skb(skb);
4223                                 continue;
4224                         }
4225                 }
4226
4227                 /* Process frame */
4228                 switch (hci_skb_pkt_type(skb)) {
4229                 case HCI_EVENT_PKT:
4230                         BT_DBG("%s Event packet", hdev->name);
4231                         hci_event_packet(hdev, skb);
4232                         break;
4233
4234                 case HCI_ACLDATA_PKT:
4235                         BT_DBG("%s ACL data packet", hdev->name);
4236                         hci_acldata_packet(hdev, skb);
4237                         break;
4238
4239                 case HCI_SCODATA_PKT:
4240                         BT_DBG("%s SCO data packet", hdev->name);
4241                         hci_scodata_packet(hdev, skb);
4242                         break;
4243
4244                 default:
4245                         kfree_skb(skb);
4246                         break;
4247                 }
4248         }
4249 }
4250
4251 static void hci_cmd_work(struct work_struct *work)
4252 {
4253         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4254         struct sk_buff *skb;
4255
4256         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4257                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4258
4259         /* Send queued commands */
4260         if (atomic_read(&hdev->cmd_cnt)) {
4261                 skb = skb_dequeue(&hdev->cmd_q);
4262                 if (!skb)
4263                         return;
4264
4265                 kfree_skb(hdev->sent_cmd);
4266
4267                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4268                 if (hdev->sent_cmd) {
4269                         atomic_dec(&hdev->cmd_cnt);
4270                         hci_send_frame(hdev, skb);
4271                         if (test_bit(HCI_RESET, &hdev->flags))
4272                                 cancel_delayed_work(&hdev->cmd_timer);
4273                         else
4274                                 schedule_delayed_work(&hdev->cmd_timer,
4275                                                       HCI_CMD_TIMEOUT);
4276                 } else {
4277                         skb_queue_head(&hdev->cmd_q, skb);
4278                         queue_work(hdev->workqueue, &hdev->cmd_work);
4279                 }
4280         }
4281 }