GNU Linux-libre 4.14.290-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active or in user channel operation, then there is no need
152          * for the vendor callback. Instead just store the desired value and
153          * the setting will be programmed when the controller gets powered on.
154          */
155         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156             (!test_bit(HCI_RUNNING, &hdev->flags) ||
157              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_PRIMARY:
264                 bredr_init(req);
265                 break;
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269         default:
270                 BT_ERR("Unknown device type %d", hdev->dev_type);
271                 break;
272         }
273
274         return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279         __le16 param;
280         __u8 flt_type;
281
282         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285         /* Read Class of Device */
286         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288         /* Read Local Name */
289         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291         /* Read Voice Setting */
292         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294         /* Read Number of Supported IAC */
295         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297         /* Read Current IAC LAP */
298         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300         /* Clear Event Filters */
301         flt_type = HCI_FLT_CLEAR_ALL;
302         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304         /* Connection accept timeout ~20 secs */
305         param = cpu_to_le16(0x7d00);
306         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311         struct hci_dev *hdev = req->hdev;
312
313         /* Read LE Buffer Size */
314         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316         /* Read LE Local Supported Features */
317         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319         /* Read LE Supported States */
320         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322         /* LE-only controllers have LE implicitly enabled */
323         if (!lmp_bredr_capable(hdev))
324                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329         struct hci_dev *hdev = req->hdev;
330
331         /* The second byte is 0xff instead of 0x9f (two reserved bits
332          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333          * command otherwise.
334          */
335         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338          * any event mask for pre 1.2 devices.
339          */
340         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341                 return;
342
343         if (lmp_bredr_capable(hdev)) {
344                 events[4] |= 0x01; /* Flow Specification Complete */
345         } else {
346                 /* Use a different default for LE-only devices */
347                 memset(events, 0, sizeof(events));
348                 events[1] |= 0x20; /* Command Complete */
349                 events[1] |= 0x40; /* Command Status */
350                 events[1] |= 0x80; /* Hardware Error */
351
352                 /* If the controller supports the Disconnect command, enable
353                  * the corresponding event. In addition enable packet flow
354                  * control related events.
355                  */
356                 if (hdev->commands[0] & 0x20) {
357                         events[0] |= 0x10; /* Disconnection Complete */
358                         events[2] |= 0x04; /* Number of Completed Packets */
359                         events[3] |= 0x02; /* Data Buffer Overflow */
360                 }
361
362                 /* If the controller supports the Read Remote Version
363                  * Information command, enable the corresponding event.
364                  */
365                 if (hdev->commands[2] & 0x80)
366                         events[1] |= 0x08; /* Read Remote Version Information
367                                             * Complete
368                                             */
369
370                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371                         events[0] |= 0x80; /* Encryption Change */
372                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
373                 }
374         }
375
376         if (lmp_inq_rssi_capable(hdev) ||
377             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378                 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380         if (lmp_ext_feat_capable(hdev))
381                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383         if (lmp_esco_capable(hdev)) {
384                 events[5] |= 0x08; /* Synchronous Connection Complete */
385                 events[5] |= 0x10; /* Synchronous Connection Changed */
386         }
387
388         if (lmp_sniffsubr_capable(hdev))
389                 events[5] |= 0x20; /* Sniff Subrating */
390
391         if (lmp_pause_enc_capable(hdev))
392                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394         if (lmp_ext_inq_capable(hdev))
395                 events[5] |= 0x40; /* Extended Inquiry Result */
396
397         if (lmp_no_flush_capable(hdev))
398                 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400         if (lmp_lsto_capable(hdev))
401                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403         if (lmp_ssp_capable(hdev)) {
404                 events[6] |= 0x01;      /* IO Capability Request */
405                 events[6] |= 0x02;      /* IO Capability Response */
406                 events[6] |= 0x04;      /* User Confirmation Request */
407                 events[6] |= 0x08;      /* User Passkey Request */
408                 events[6] |= 0x10;      /* Remote OOB Data Request */
409                 events[6] |= 0x20;      /* Simple Pairing Complete */
410                 events[7] |= 0x04;      /* User Passkey Notification */
411                 events[7] |= 0x08;      /* Keypress Notification */
412                 events[7] |= 0x10;      /* Remote Host Supported
413                                          * Features Notification
414                                          */
415         }
416
417         if (lmp_le_capable(hdev))
418                 events[7] |= 0x20;      /* LE Meta-Event */
419
420         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425         struct hci_dev *hdev = req->hdev;
426
427         if (hdev->dev_type == HCI_AMP)
428                 return amp_init2(req);
429
430         if (lmp_bredr_capable(hdev))
431                 bredr_setup(req);
432         else
433                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435         if (lmp_le_capable(hdev))
436                 le_setup(req);
437
438         /* All Bluetooth 1.2 and later controllers should support the
439          * HCI command for reading the local supported commands.
440          *
441          * Unfortunately some controllers indicate Bluetooth 1.2 support,
442          * but do not have support for this command. If that is the case,
443          * the driver can quirk the behavior and skip reading the local
444          * supported commands.
445          */
446         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450         if (lmp_ssp_capable(hdev)) {
451                 /* When SSP is available, then the host features page
452                  * should also be available as well. However some
453                  * controllers list the max_page as 0 as long as SSP
454                  * has not been enabled. To achieve proper debugging
455                  * output, force the minimum max_page to 1 at least.
456                  */
457                 hdev->max_page = 0x01;
458
459                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460                         u8 mode = 0x01;
461
462                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463                                     sizeof(mode), &mode);
464                 } else {
465                         struct hci_cp_write_eir cp;
466
467                         memset(hdev->eir, 0, sizeof(hdev->eir));
468                         memset(&cp, 0, sizeof(cp));
469
470                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471                 }
472         }
473
474         if (lmp_inq_rssi_capable(hdev) ||
475             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476                 u8 mode;
477
478                 /* If Extended Inquiry Result events are supported, then
479                  * they are clearly preferred over Inquiry Result with RSSI
480                  * events.
481                  */
482                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485         }
486
487         if (lmp_inq_tx_pwr_capable(hdev))
488                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490         if (lmp_ext_feat_capable(hdev)) {
491                 struct hci_cp_read_local_ext_features cp;
492
493                 cp.page = 0x01;
494                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495                             sizeof(cp), &cp);
496         }
497
498         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499                 u8 enable = 1;
500                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501                             &enable);
502         }
503
504         return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510         struct hci_cp_write_def_link_policy cp;
511         u16 link_policy = 0;
512
513         if (lmp_rswitch_capable(hdev))
514                 link_policy |= HCI_LP_RSWITCH;
515         if (lmp_hold_capable(hdev))
516                 link_policy |= HCI_LP_HOLD;
517         if (lmp_sniff_capable(hdev))
518                 link_policy |= HCI_LP_SNIFF;
519         if (lmp_park_capable(hdev))
520                 link_policy |= HCI_LP_PARK;
521
522         cp.policy = cpu_to_le16(link_policy);
523         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529         struct hci_cp_write_le_host_supported cp;
530
531         /* LE-only devices do not support explicit enablement */
532         if (!lmp_bredr_capable(hdev))
533                 return;
534
535         memset(&cp, 0, sizeof(cp));
536
537         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538                 cp.le = 0x01;
539                 cp.simul = 0x00;
540         }
541
542         if (cp.le != lmp_host_le_capable(hdev))
543                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544                             &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549         struct hci_dev *hdev = req->hdev;
550         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551         bool changed = false;
552
553         /* If Connectionless Slave Broadcast master role is supported
554          * enable all necessary events for it.
555          */
556         if (lmp_csb_master_capable(hdev)) {
557                 events[1] |= 0x40;      /* Triggered Clock Capture */
558                 events[1] |= 0x80;      /* Synchronization Train Complete */
559                 events[2] |= 0x10;      /* Slave Page Response Timeout */
560                 events[2] |= 0x20;      /* CSB Channel Map Change */
561                 changed = true;
562         }
563
564         /* If Connectionless Slave Broadcast slave role is supported
565          * enable all necessary events for it.
566          */
567         if (lmp_csb_slave_capable(hdev)) {
568                 events[2] |= 0x01;      /* Synchronization Train Received */
569                 events[2] |= 0x02;      /* CSB Receive */
570                 events[2] |= 0x04;      /* CSB Timeout */
571                 events[2] |= 0x08;      /* Truncated Page Complete */
572                 changed = true;
573         }
574
575         /* Enable Authenticated Payload Timeout Expired event if supported */
576         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577                 events[2] |= 0x80;
578                 changed = true;
579         }
580
581         /* Some Broadcom based controllers indicate support for Set Event
582          * Mask Page 2 command, but then actually do not support it. Since
583          * the default value is all bits set to zero, the command is only
584          * required if the event mask has to be changed. In case no change
585          * to the event mask is needed, skip this command.
586          */
587         if (changed)
588                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589                             sizeof(events), events);
590 }
591
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594         struct hci_dev *hdev = req->hdev;
595         u8 p;
596
597         hci_setup_event_mask(req);
598
599         if (hdev->commands[6] & 0x20 &&
600             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601                 struct hci_cp_read_stored_link_key cp;
602
603                 bacpy(&cp.bdaddr, BDADDR_ANY);
604                 cp.read_all = 0x01;
605                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606         }
607
608         if (hdev->commands[5] & 0x10)
609                 hci_setup_link_policy(req);
610
611         if (hdev->commands[8] & 0x01)
612                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614         /* Some older Broadcom based Bluetooth 1.2 controllers do not
615          * support the Read Page Scan Type command. Check support for
616          * this command in the bit mask of supported commands.
617          */
618         if (hdev->commands[13] & 0x01)
619                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621         if (lmp_le_capable(hdev)) {
622                 u8 events[8];
623
624                 memset(events, 0, sizeof(events));
625
626                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627                         events[0] |= 0x10;      /* LE Long Term Key Request */
628
629                 /* If controller supports the Connection Parameters Request
630                  * Link Layer Procedure, enable the corresponding event.
631                  */
632                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633                         events[0] |= 0x20;      /* LE Remote Connection
634                                                  * Parameter Request
635                                                  */
636
637                 /* If the controller supports the Data Length Extension
638                  * feature, enable the corresponding event.
639                  */
640                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641                         events[0] |= 0x40;      /* LE Data Length Change */
642
643                 /* If the controller supports Extended Scanner Filter
644                  * Policies, enable the correspondig event.
645                  */
646                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647                         events[1] |= 0x04;      /* LE Direct Advertising
648                                                  * Report
649                                                  */
650
651                 /* If the controller supports Channel Selection Algorithm #2
652                  * feature, enable the corresponding event.
653                  */
654                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
655                         events[2] |= 0x08;      /* LE Channel Selection
656                                                  * Algorithm
657                                                  */
658
659                 /* If the controller supports the LE Set Scan Enable command,
660                  * enable the corresponding advertising report event.
661                  */
662                 if (hdev->commands[26] & 0x08)
663                         events[0] |= 0x02;      /* LE Advertising Report */
664
665                 /* If the controller supports the LE Create Connection
666                  * command, enable the corresponding event.
667                  */
668                 if (hdev->commands[26] & 0x10)
669                         events[0] |= 0x01;      /* LE Connection Complete */
670
671                 /* If the controller supports the LE Connection Update
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[27] & 0x04)
675                         events[0] |= 0x04;      /* LE Connection Update
676                                                  * Complete
677                                                  */
678
679                 /* If the controller supports the LE Read Remote Used Features
680                  * command, enable the corresponding event.
681                  */
682                 if (hdev->commands[27] & 0x20)
683                         events[0] |= 0x08;      /* LE Read Remote Used
684                                                  * Features Complete
685                                                  */
686
687                 /* If the controller supports the LE Read Local P-256
688                  * Public Key command, enable the corresponding event.
689                  */
690                 if (hdev->commands[34] & 0x02)
691                         events[0] |= 0x80;      /* LE Read Local P-256
692                                                  * Public Key Complete
693                                                  */
694
695                 /* If the controller supports the LE Generate DHKey
696                  * command, enable the corresponding event.
697                  */
698                 if (hdev->commands[34] & 0x04)
699                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
700
701                 /* If the controller supports the LE Set Default PHY or
702                  * LE Set PHY commands, enable the corresponding event.
703                  */
704                 if (hdev->commands[35] & (0x20 | 0x40))
705                         events[1] |= 0x08;        /* LE PHY Update Complete */
706
707                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
708                             events);
709
710                 if (hdev->commands[25] & 0x40) {
711                         /* Read LE Advertising Channel TX Power */
712                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
713                 }
714
715                 if (hdev->commands[26] & 0x40) {
716                         /* Read LE White List Size */
717                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
718                                     0, NULL);
719                 }
720
721                 if (hdev->commands[26] & 0x80) {
722                         /* Clear LE White List */
723                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
724                 }
725
726                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
727                         /* Read LE Maximum Data Length */
728                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
729
730                         /* Read LE Suggested Default Data Length */
731                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
732                 }
733
734                 hci_set_le_support(req);
735         }
736
737         /* Read features beyond page 1 if available */
738         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
739                 struct hci_cp_read_local_ext_features cp;
740
741                 cp.page = p;
742                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
743                             sizeof(cp), &cp);
744         }
745
746         return 0;
747 }
748
749 static int hci_init4_req(struct hci_request *req, unsigned long opt)
750 {
751         struct hci_dev *hdev = req->hdev;
752
753         /* Some Broadcom based Bluetooth controllers do not support the
754          * Delete Stored Link Key command. They are clearly indicating its
755          * absence in the bit mask of supported commands.
756          *
757          * Check the supported commands and only if the the command is marked
758          * as supported send it. If not supported assume that the controller
759          * does not have actual support for stored link keys which makes this
760          * command redundant anyway.
761          *
762          * Some controllers indicate that they support handling deleting
763          * stored link keys, but they don't. The quirk lets a driver
764          * just disable this command.
765          */
766         if (hdev->commands[6] & 0x80 &&
767             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
768                 struct hci_cp_delete_stored_link_key cp;
769
770                 bacpy(&cp.bdaddr, BDADDR_ANY);
771                 cp.delete_all = 0x01;
772                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
773                             sizeof(cp), &cp);
774         }
775
776         /* Set event mask page 2 if the HCI command for it is supported */
777         if (hdev->commands[22] & 0x04)
778                 hci_set_event_mask_page_2(req);
779
780         /* Read local codec list if the HCI command is supported */
781         if (hdev->commands[29] & 0x20)
782                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
783
784         /* Get MWS transport configuration if the HCI command is supported */
785         if (hdev->commands[30] & 0x08)
786                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
787
788         /* Check for Synchronization Train support */
789         if (lmp_sync_train_capable(hdev))
790                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
791
792         /* Enable Secure Connections if supported and configured */
793         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
794             bredr_sc_enabled(hdev)) {
795                 u8 support = 0x01;
796
797                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
798                             sizeof(support), &support);
799         }
800
801         /* Set Suggested Default Data Length to maximum if supported */
802         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
803                 struct hci_cp_le_write_def_data_len cp;
804
805                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
806                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
807                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
808         }
809
810         /* Set Default PHY parameters if command is supported */
811         if (hdev->commands[35] & 0x20) {
812                 struct hci_cp_le_set_default_phy cp;
813
814                 /* No transmitter PHY or receiver PHY preferences */
815                 cp.all_phys = 0x03;
816                 cp.tx_phys = 0;
817                 cp.rx_phys = 0;
818
819                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
820         }
821
822         return 0;
823 }
824
825 static int __hci_init(struct hci_dev *hdev)
826 {
827         int err;
828
829         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
830         if (err < 0)
831                 return err;
832
833         if (hci_dev_test_flag(hdev, HCI_SETUP))
834                 hci_debugfs_create_basic(hdev);
835
836         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
837         if (err < 0)
838                 return err;
839
840         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841          * BR/EDR/LE type controllers. AMP controllers only need the
842          * first two stages of init.
843          */
844         if (hdev->dev_type != HCI_PRIMARY)
845                 return 0;
846
847         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
848         if (err < 0)
849                 return err;
850
851         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
852         if (err < 0)
853                 return err;
854
855         /* This function is only called when the controller is actually in
856          * configured state. When the controller is marked as unconfigured,
857          * this initialization procedure is not run.
858          *
859          * It means that it is possible that a controller runs through its
860          * setup phase and then discovers missing settings. If that is the
861          * case, then this function will not be called. It then will only
862          * be called during the config phase.
863          *
864          * So only when in setup phase or config phase, create the debugfs
865          * entries and register the SMP channels.
866          */
867         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
868             !hci_dev_test_flag(hdev, HCI_CONFIG))
869                 return 0;
870
871         hci_debugfs_create_common(hdev);
872
873         if (lmp_bredr_capable(hdev))
874                 hci_debugfs_create_bredr(hdev);
875
876         if (lmp_le_capable(hdev))
877                 hci_debugfs_create_le(hdev);
878
879         return 0;
880 }
881
882 static int hci_init0_req(struct hci_request *req, unsigned long opt)
883 {
884         struct hci_dev *hdev = req->hdev;
885
886         BT_DBG("%s %ld", hdev->name, opt);
887
888         /* Reset */
889         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
890                 hci_reset_req(req, 0);
891
892         /* Read Local Version */
893         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
894
895         /* Read BD Address */
896         if (hdev->set_bdaddr)
897                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
898
899         return 0;
900 }
901
902 static int __hci_unconf_init(struct hci_dev *hdev)
903 {
904         int err;
905
906         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
907                 return 0;
908
909         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
910         if (err < 0)
911                 return err;
912
913         if (hci_dev_test_flag(hdev, HCI_SETUP))
914                 hci_debugfs_create_basic(hdev);
915
916         return 0;
917 }
918
919 static int hci_scan_req(struct hci_request *req, unsigned long opt)
920 {
921         __u8 scan = opt;
922
923         BT_DBG("%s %x", req->hdev->name, scan);
924
925         /* Inquiry and Page scans */
926         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
927         return 0;
928 }
929
930 static int hci_auth_req(struct hci_request *req, unsigned long opt)
931 {
932         __u8 auth = opt;
933
934         BT_DBG("%s %x", req->hdev->name, auth);
935
936         /* Authentication */
937         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
938         return 0;
939 }
940
941 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
942 {
943         __u8 encrypt = opt;
944
945         BT_DBG("%s %x", req->hdev->name, encrypt);
946
947         /* Encryption */
948         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
949         return 0;
950 }
951
952 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
953 {
954         __le16 policy = cpu_to_le16(opt);
955
956         BT_DBG("%s %x", req->hdev->name, policy);
957
958         /* Default link policy */
959         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
960         return 0;
961 }
962
963 /* Get HCI device by index.
964  * Device is held on return. */
965 struct hci_dev *hci_dev_get(int index)
966 {
967         struct hci_dev *hdev = NULL, *d;
968
969         BT_DBG("%d", index);
970
971         if (index < 0)
972                 return NULL;
973
974         read_lock(&hci_dev_list_lock);
975         list_for_each_entry(d, &hci_dev_list, list) {
976                 if (d->id == index) {
977                         hdev = hci_dev_hold(d);
978                         break;
979                 }
980         }
981         read_unlock(&hci_dev_list_lock);
982         return hdev;
983 }
984
985 /* ---- Inquiry support ---- */
986
987 bool hci_discovery_active(struct hci_dev *hdev)
988 {
989         struct discovery_state *discov = &hdev->discovery;
990
991         switch (discov->state) {
992         case DISCOVERY_FINDING:
993         case DISCOVERY_RESOLVING:
994                 return true;
995
996         default:
997                 return false;
998         }
999 }
1000
1001 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1002 {
1003         int old_state = hdev->discovery.state;
1004
1005         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1006
1007         if (old_state == state)
1008                 return;
1009
1010         hdev->discovery.state = state;
1011
1012         switch (state) {
1013         case DISCOVERY_STOPPED:
1014                 hci_update_background_scan(hdev);
1015
1016                 if (old_state != DISCOVERY_STARTING)
1017                         mgmt_discovering(hdev, 0);
1018                 break;
1019         case DISCOVERY_STARTING:
1020                 break;
1021         case DISCOVERY_FINDING:
1022                 mgmt_discovering(hdev, 1);
1023                 break;
1024         case DISCOVERY_RESOLVING:
1025                 break;
1026         case DISCOVERY_STOPPING:
1027                 break;
1028         }
1029 }
1030
1031 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1032 {
1033         struct discovery_state *cache = &hdev->discovery;
1034         struct inquiry_entry *p, *n;
1035
1036         list_for_each_entry_safe(p, n, &cache->all, all) {
1037                 list_del(&p->all);
1038                 kfree(p);
1039         }
1040
1041         INIT_LIST_HEAD(&cache->unknown);
1042         INIT_LIST_HEAD(&cache->resolve);
1043 }
1044
1045 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1046                                                bdaddr_t *bdaddr)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *e;
1050
1051         BT_DBG("cache %p, %pMR", cache, bdaddr);
1052
1053         list_for_each_entry(e, &cache->all, all) {
1054                 if (!bacmp(&e->data.bdaddr, bdaddr))
1055                         return e;
1056         }
1057
1058         return NULL;
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1062                                                        bdaddr_t *bdaddr)
1063 {
1064         struct discovery_state *cache = &hdev->discovery;
1065         struct inquiry_entry *e;
1066
1067         BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069         list_for_each_entry(e, &cache->unknown, list) {
1070                 if (!bacmp(&e->data.bdaddr, bdaddr))
1071                         return e;
1072         }
1073
1074         return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1078                                                        bdaddr_t *bdaddr,
1079                                                        int state)
1080 {
1081         struct discovery_state *cache = &hdev->discovery;
1082         struct inquiry_entry *e;
1083
1084         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1085
1086         list_for_each_entry(e, &cache->resolve, list) {
1087                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1088                         return e;
1089                 if (!bacmp(&e->data.bdaddr, bdaddr))
1090                         return e;
1091         }
1092
1093         return NULL;
1094 }
1095
1096 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097                                       struct inquiry_entry *ie)
1098 {
1099         struct discovery_state *cache = &hdev->discovery;
1100         struct list_head *pos = &cache->resolve;
1101         struct inquiry_entry *p;
1102
1103         list_del(&ie->list);
1104
1105         list_for_each_entry(p, &cache->resolve, list) {
1106                 if (p->name_state != NAME_PENDING &&
1107                     abs(p->data.rssi) >= abs(ie->data.rssi))
1108                         break;
1109                 pos = &p->list;
1110         }
1111
1112         list_add(&ie->list, pos);
1113 }
1114
1115 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1116                              bool name_known)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *ie;
1120         u32 flags = 0;
1121
1122         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1123
1124         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1125
1126         if (!data->ssp_mode)
1127                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1128
1129         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1130         if (ie) {
1131                 if (!ie->data.ssp_mode)
1132                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1133
1134                 if (ie->name_state == NAME_NEEDED &&
1135                     data->rssi != ie->data.rssi) {
1136                         ie->data.rssi = data->rssi;
1137                         hci_inquiry_cache_update_resolve(hdev, ie);
1138                 }
1139
1140                 goto update;
1141         }
1142
1143         /* Entry not in the cache. Add new one. */
1144         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1145         if (!ie) {
1146                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1147                 goto done;
1148         }
1149
1150         list_add(&ie->all, &cache->all);
1151
1152         if (name_known) {
1153                 ie->name_state = NAME_KNOWN;
1154         } else {
1155                 ie->name_state = NAME_NOT_KNOWN;
1156                 list_add(&ie->list, &cache->unknown);
1157         }
1158
1159 update:
1160         if (name_known && ie->name_state != NAME_KNOWN &&
1161             ie->name_state != NAME_PENDING) {
1162                 ie->name_state = NAME_KNOWN;
1163                 list_del(&ie->list);
1164         }
1165
1166         memcpy(&ie->data, data, sizeof(*data));
1167         ie->timestamp = jiffies;
1168         cache->timestamp = jiffies;
1169
1170         if (ie->name_state == NAME_NOT_KNOWN)
1171                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1172
1173 done:
1174         return flags;
1175 }
1176
1177 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1178 {
1179         struct discovery_state *cache = &hdev->discovery;
1180         struct inquiry_info *info = (struct inquiry_info *) buf;
1181         struct inquiry_entry *e;
1182         int copied = 0;
1183
1184         list_for_each_entry(e, &cache->all, all) {
1185                 struct inquiry_data *data = &e->data;
1186
1187                 if (copied >= num)
1188                         break;
1189
1190                 bacpy(&info->bdaddr, &data->bdaddr);
1191                 info->pscan_rep_mode    = data->pscan_rep_mode;
1192                 info->pscan_period_mode = data->pscan_period_mode;
1193                 info->pscan_mode        = data->pscan_mode;
1194                 memcpy(info->dev_class, data->dev_class, 3);
1195                 info->clock_offset      = data->clock_offset;
1196
1197                 info++;
1198                 copied++;
1199         }
1200
1201         BT_DBG("cache %p, copied %d", cache, copied);
1202         return copied;
1203 }
1204
1205 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1206 {
1207         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208         struct hci_dev *hdev = req->hdev;
1209         struct hci_cp_inquiry cp;
1210
1211         BT_DBG("%s", hdev->name);
1212
1213         if (test_bit(HCI_INQUIRY, &hdev->flags))
1214                 return 0;
1215
1216         /* Start Inquiry */
1217         memcpy(&cp.lap, &ir->lap, 3);
1218         cp.length  = ir->length;
1219         cp.num_rsp = ir->num_rsp;
1220         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1221
1222         return 0;
1223 }
1224
1225 int hci_inquiry(void __user *arg)
1226 {
1227         __u8 __user *ptr = arg;
1228         struct hci_inquiry_req ir;
1229         struct hci_dev *hdev;
1230         int err = 0, do_inquiry = 0, max_rsp;
1231         long timeo;
1232         __u8 *buf;
1233
1234         if (copy_from_user(&ir, ptr, sizeof(ir)))
1235                 return -EFAULT;
1236
1237         hdev = hci_dev_get(ir.dev_id);
1238         if (!hdev)
1239                 return -ENODEV;
1240
1241         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1242                 err = -EBUSY;
1243                 goto done;
1244         }
1245
1246         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1247                 err = -EOPNOTSUPP;
1248                 goto done;
1249         }
1250
1251         if (hdev->dev_type != HCI_PRIMARY) {
1252                 err = -EOPNOTSUPP;
1253                 goto done;
1254         }
1255
1256         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1257                 err = -EOPNOTSUPP;
1258                 goto done;
1259         }
1260
1261         /* Restrict maximum inquiry length to 60 seconds */
1262         if (ir.length > 60) {
1263                 err = -EINVAL;
1264                 goto done;
1265         }
1266
1267         hci_dev_lock(hdev);
1268         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1269             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1270                 hci_inquiry_cache_flush(hdev);
1271                 do_inquiry = 1;
1272         }
1273         hci_dev_unlock(hdev);
1274
1275         timeo = ir.length * msecs_to_jiffies(2000);
1276
1277         if (do_inquiry) {
1278                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1279                                    timeo, NULL);
1280                 if (err < 0)
1281                         goto done;
1282
1283                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1284                  * cleared). If it is interrupted by a signal, return -EINTR.
1285                  */
1286                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1287                                 TASK_INTERRUPTIBLE)) {
1288                         err = -EINTR;
1289                         goto done;
1290                 }
1291         }
1292
1293         /* for unlimited number of responses we will use buffer with
1294          * 255 entries
1295          */
1296         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1297
1298         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1299          * copy it to the user space.
1300          */
1301         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1302         if (!buf) {
1303                 err = -ENOMEM;
1304                 goto done;
1305         }
1306
1307         hci_dev_lock(hdev);
1308         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1309         hci_dev_unlock(hdev);
1310
1311         BT_DBG("num_rsp %d", ir.num_rsp);
1312
1313         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1314                 ptr += sizeof(ir);
1315                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1316                                  ir.num_rsp))
1317                         err = -EFAULT;
1318         } else
1319                 err = -EFAULT;
1320
1321         kfree(buf);
1322
1323 done:
1324         hci_dev_put(hdev);
1325         return err;
1326 }
1327
1328 static int hci_dev_do_open(struct hci_dev *hdev)
1329 {
1330         int ret = 0;
1331
1332         BT_DBG("%s %p", hdev->name, hdev);
1333
1334         hci_req_sync_lock(hdev);
1335
1336         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1337                 ret = -ENODEV;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1342             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1343                 /* Check for rfkill but allow the HCI setup stage to
1344                  * proceed (which in itself doesn't cause any RF activity).
1345                  */
1346                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1347                         ret = -ERFKILL;
1348                         goto done;
1349                 }
1350
1351                 /* Check for valid public address or a configured static
1352                  * random adddress, but let the HCI setup proceed to
1353                  * be able to determine if there is a public address
1354                  * or not.
1355                  *
1356                  * In case of user channel usage, it is not important
1357                  * if a public address or static random address is
1358                  * available.
1359                  *
1360                  * This check is only valid for BR/EDR controllers
1361                  * since AMP controllers do not have an address.
1362                  */
1363                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1364                     hdev->dev_type == HCI_PRIMARY &&
1365                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1366                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1367                         ret = -EADDRNOTAVAIL;
1368                         goto done;
1369                 }
1370         }
1371
1372         if (test_bit(HCI_UP, &hdev->flags)) {
1373                 ret = -EALREADY;
1374                 goto done;
1375         }
1376
1377         if (hdev->open(hdev)) {
1378                 ret = -EIO;
1379                 goto done;
1380         }
1381
1382         set_bit(HCI_RUNNING, &hdev->flags);
1383         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1384
1385         atomic_set(&hdev->cmd_cnt, 1);
1386         set_bit(HCI_INIT, &hdev->flags);
1387
1388         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1389                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1390
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1432                         ret = __hci_init(hdev);
1433                         if (!ret && hdev->post_init)
1434                                 ret = hdev->post_init(hdev);
1435                 }
1436         }
1437
1438         /* If the HCI Reset command is clearing all diagnostic settings,
1439          * then they need to be reprogrammed after the init procedure
1440          * completed.
1441          */
1442         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1443             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1444             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1445                 ret = hdev->set_diag(hdev, true);
1446
1447         clear_bit(HCI_INIT, &hdev->flags);
1448
1449         if (!ret) {
1450                 hci_dev_hold(hdev);
1451                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1452                 set_bit(HCI_UP, &hdev->flags);
1453                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1454                 hci_leds_update_powered(hdev, true);
1455                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1456                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1457                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1459                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1460                     hdev->dev_type == HCI_PRIMARY) {
1461                         ret = __hci_req_hci_power_on(hdev);
1462                         mgmt_power_on(hdev, ret);
1463                 }
1464         } else {
1465                 /* Init failed, cleanup */
1466                 flush_work(&hdev->tx_work);
1467
1468                 /* Since hci_rx_work() is possible to awake new cmd_work
1469                  * it should be flushed first to avoid unexpected call of
1470                  * hci_cmd_work()
1471                  */
1472                 flush_work(&hdev->rx_work);
1473                 flush_work(&hdev->cmd_work);
1474
1475                 skb_queue_purge(&hdev->cmd_q);
1476                 skb_queue_purge(&hdev->rx_q);
1477
1478                 if (hdev->flush)
1479                         hdev->flush(hdev);
1480
1481                 if (hdev->sent_cmd) {
1482                         kfree_skb(hdev->sent_cmd);
1483                         hdev->sent_cmd = NULL;
1484                 }
1485
1486                 clear_bit(HCI_RUNNING, &hdev->flags);
1487                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1488
1489                 hdev->close(hdev);
1490                 hdev->flags &= BIT(HCI_RAW);
1491         }
1492
1493 done:
1494         hci_req_sync_unlock(hdev);
1495         return ret;
1496 }
1497
1498 /* ---- HCI ioctl helpers ---- */
1499
1500 int hci_dev_open(__u16 dev)
1501 {
1502         struct hci_dev *hdev;
1503         int err;
1504
1505         hdev = hci_dev_get(dev);
1506         if (!hdev)
1507                 return -ENODEV;
1508
1509         /* Devices that are marked as unconfigured can only be powered
1510          * up as user channel. Trying to bring them up as normal devices
1511          * will result into a failure. Only user channel operation is
1512          * possible.
1513          *
1514          * When this function is called for a user channel, the flag
1515          * HCI_USER_CHANNEL will be set first before attempting to
1516          * open the device.
1517          */
1518         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1519             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1520                 err = -EOPNOTSUPP;
1521                 goto done;
1522         }
1523
1524         /* We need to ensure that no other power on/off work is pending
1525          * before proceeding to call hci_dev_do_open. This is
1526          * particularly important if the setup procedure has not yet
1527          * completed.
1528          */
1529         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1530                 cancel_delayed_work(&hdev->power_off);
1531
1532         /* After this call it is guaranteed that the setup procedure
1533          * has finished. This means that error conditions like RFKILL
1534          * or no valid public or static random address apply.
1535          */
1536         flush_workqueue(hdev->req_workqueue);
1537
1538         /* For controllers not using the management interface and that
1539          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1540          * so that pairing works for them. Once the management interface
1541          * is in use this bit will be cleared again and userspace has
1542          * to explicitly enable it.
1543          */
1544         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1545             !hci_dev_test_flag(hdev, HCI_MGMT))
1546                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1547
1548         err = hci_dev_do_open(hdev);
1549
1550 done:
1551         hci_dev_put(hdev);
1552         return err;
1553 }
1554
1555 /* This function requires the caller holds hdev->lock */
1556 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1557 {
1558         struct hci_conn_params *p;
1559
1560         list_for_each_entry(p, &hdev->le_conn_params, list) {
1561                 if (p->conn) {
1562                         hci_conn_drop(p->conn);
1563                         hci_conn_put(p->conn);
1564                         p->conn = NULL;
1565                 }
1566                 list_del_init(&p->action);
1567         }
1568
1569         BT_DBG("All LE pending actions cleared");
1570 }
1571
1572 int hci_dev_do_close(struct hci_dev *hdev)
1573 {
1574         bool auto_off;
1575
1576         BT_DBG("%s %p", hdev->name, hdev);
1577
1578         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1579             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1580             test_bit(HCI_UP, &hdev->flags)) {
1581                 /* Execute vendor specific shutdown routine */
1582                 if (hdev->shutdown)
1583                         hdev->shutdown(hdev);
1584         }
1585
1586         cancel_delayed_work(&hdev->power_off);
1587
1588         hci_request_cancel_all(hdev);
1589         hci_req_sync_lock(hdev);
1590
1591         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1592             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593             test_bit(HCI_UP, &hdev->flags)) {
1594                 /* Execute vendor specific shutdown routine */
1595                 if (hdev->shutdown)
1596                         hdev->shutdown(hdev);
1597         }
1598
1599         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1600                 cancel_delayed_work_sync(&hdev->cmd_timer);
1601                 hci_req_sync_unlock(hdev);
1602                 return 0;
1603         }
1604
1605         hci_leds_update_powered(hdev, false);
1606
1607         /* Flush RX and TX works */
1608         flush_work(&hdev->tx_work);
1609         flush_work(&hdev->rx_work);
1610
1611         if (hdev->discov_timeout > 0) {
1612                 hdev->discov_timeout = 0;
1613                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1614                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1615         }
1616
1617         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1618                 cancel_delayed_work(&hdev->service_cache);
1619
1620         if (hci_dev_test_flag(hdev, HCI_MGMT))
1621                 cancel_delayed_work_sync(&hdev->rpa_expired);
1622
1623         /* Avoid potential lockdep warnings from the *_flush() calls by
1624          * ensuring the workqueue is empty up front.
1625          */
1626         drain_workqueue(hdev->workqueue);
1627
1628         hci_dev_lock(hdev);
1629
1630         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1631
1632         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1633
1634         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1635             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1636             hci_dev_test_flag(hdev, HCI_MGMT))
1637                 __mgmt_power_off(hdev);
1638
1639         hci_inquiry_cache_flush(hdev);
1640         hci_pend_le_actions_clear(hdev);
1641         hci_conn_hash_flush(hdev);
1642         hci_dev_unlock(hdev);
1643
1644         smp_unregister(hdev);
1645
1646         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1647
1648         if (hdev->flush)
1649                 hdev->flush(hdev);
1650
1651         /* Reset device */
1652         skb_queue_purge(&hdev->cmd_q);
1653         atomic_set(&hdev->cmd_cnt, 1);
1654         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1655             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1656                 set_bit(HCI_INIT, &hdev->flags);
1657                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1658                 clear_bit(HCI_INIT, &hdev->flags);
1659         }
1660
1661         /* flush cmd  work */
1662         flush_work(&hdev->cmd_work);
1663
1664         /* Drop queues */
1665         skb_queue_purge(&hdev->rx_q);
1666         skb_queue_purge(&hdev->cmd_q);
1667         skb_queue_purge(&hdev->raw_q);
1668
1669         /* Drop last sent command */
1670         if (hdev->sent_cmd) {
1671                 cancel_delayed_work_sync(&hdev->cmd_timer);
1672                 kfree_skb(hdev->sent_cmd);
1673                 hdev->sent_cmd = NULL;
1674         }
1675
1676         clear_bit(HCI_RUNNING, &hdev->flags);
1677         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1678
1679         /* After this point our queues are empty
1680          * and no tasks are scheduled. */
1681         hdev->close(hdev);
1682
1683         /* Clear flags */
1684         hdev->flags &= BIT(HCI_RAW);
1685         hci_dev_clear_volatile_flags(hdev);
1686
1687         /* Controller radio is available but is currently powered down */
1688         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1689
1690         memset(hdev->eir, 0, sizeof(hdev->eir));
1691         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1692         bacpy(&hdev->random_addr, BDADDR_ANY);
1693
1694         hci_req_sync_unlock(hdev);
1695
1696         hci_dev_put(hdev);
1697         return 0;
1698 }
1699
1700 int hci_dev_close(__u16 dev)
1701 {
1702         struct hci_dev *hdev;
1703         int err;
1704
1705         hdev = hci_dev_get(dev);
1706         if (!hdev)
1707                 return -ENODEV;
1708
1709         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1710                 err = -EBUSY;
1711                 goto done;
1712         }
1713
1714         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1715                 cancel_delayed_work(&hdev->power_off);
1716
1717         err = hci_dev_do_close(hdev);
1718
1719 done:
1720         hci_dev_put(hdev);
1721         return err;
1722 }
1723
1724 static int hci_dev_do_reset(struct hci_dev *hdev)
1725 {
1726         int ret;
1727
1728         BT_DBG("%s %p", hdev->name, hdev);
1729
1730         hci_req_sync_lock(hdev);
1731
1732         /* Drop queues */
1733         skb_queue_purge(&hdev->rx_q);
1734         skb_queue_purge(&hdev->cmd_q);
1735
1736         /* Avoid potential lockdep warnings from the *_flush() calls by
1737          * ensuring the workqueue is empty up front.
1738          */
1739         drain_workqueue(hdev->workqueue);
1740
1741         hci_dev_lock(hdev);
1742         hci_inquiry_cache_flush(hdev);
1743         hci_conn_hash_flush(hdev);
1744         hci_dev_unlock(hdev);
1745
1746         if (hdev->flush)
1747                 hdev->flush(hdev);
1748
1749         atomic_set(&hdev->cmd_cnt, 1);
1750         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1751
1752         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1753
1754         hci_req_sync_unlock(hdev);
1755         return ret;
1756 }
1757
1758 int hci_dev_reset(__u16 dev)
1759 {
1760         struct hci_dev *hdev;
1761         int err;
1762
1763         hdev = hci_dev_get(dev);
1764         if (!hdev)
1765                 return -ENODEV;
1766
1767         if (!test_bit(HCI_UP, &hdev->flags)) {
1768                 err = -ENETDOWN;
1769                 goto done;
1770         }
1771
1772         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1773                 err = -EBUSY;
1774                 goto done;
1775         }
1776
1777         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1778                 err = -EOPNOTSUPP;
1779                 goto done;
1780         }
1781
1782         err = hci_dev_do_reset(hdev);
1783
1784 done:
1785         hci_dev_put(hdev);
1786         return err;
1787 }
1788
1789 int hci_dev_reset_stat(__u16 dev)
1790 {
1791         struct hci_dev *hdev;
1792         int ret = 0;
1793
1794         hdev = hci_dev_get(dev);
1795         if (!hdev)
1796                 return -ENODEV;
1797
1798         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1799                 ret = -EBUSY;
1800                 goto done;
1801         }
1802
1803         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1804                 ret = -EOPNOTSUPP;
1805                 goto done;
1806         }
1807
1808         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1809
1810 done:
1811         hci_dev_put(hdev);
1812         return ret;
1813 }
1814
1815 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1816 {
1817         bool conn_changed, discov_changed;
1818
1819         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1820
1821         if ((scan & SCAN_PAGE))
1822                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1823                                                           HCI_CONNECTABLE);
1824         else
1825                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1826                                                            HCI_CONNECTABLE);
1827
1828         if ((scan & SCAN_INQUIRY)) {
1829                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1830                                                             HCI_DISCOVERABLE);
1831         } else {
1832                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1833                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1834                                                              HCI_DISCOVERABLE);
1835         }
1836
1837         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1838                 return;
1839
1840         if (conn_changed || discov_changed) {
1841                 /* In case this was disabled through mgmt */
1842                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1843
1844                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1845                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1846
1847                 mgmt_new_settings(hdev);
1848         }
1849 }
1850
1851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1852 {
1853         struct hci_dev *hdev;
1854         struct hci_dev_req dr;
1855         int err = 0;
1856
1857         if (copy_from_user(&dr, arg, sizeof(dr)))
1858                 return -EFAULT;
1859
1860         hdev = hci_dev_get(dr.dev_id);
1861         if (!hdev)
1862                 return -ENODEV;
1863
1864         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1865                 err = -EBUSY;
1866                 goto done;
1867         }
1868
1869         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1870                 err = -EOPNOTSUPP;
1871                 goto done;
1872         }
1873
1874         if (hdev->dev_type != HCI_PRIMARY) {
1875                 err = -EOPNOTSUPP;
1876                 goto done;
1877         }
1878
1879         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1880                 err = -EOPNOTSUPP;
1881                 goto done;
1882         }
1883
1884         switch (cmd) {
1885         case HCISETAUTH:
1886                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1887                                    HCI_INIT_TIMEOUT, NULL);
1888                 break;
1889
1890         case HCISETENCRYPT:
1891                 if (!lmp_encrypt_capable(hdev)) {
1892                         err = -EOPNOTSUPP;
1893                         break;
1894                 }
1895
1896                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1897                         /* Auth must be enabled first */
1898                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1899                                            HCI_INIT_TIMEOUT, NULL);
1900                         if (err)
1901                                 break;
1902                 }
1903
1904                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1905                                    HCI_INIT_TIMEOUT, NULL);
1906                 break;
1907
1908         case HCISETSCAN:
1909                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1910                                    HCI_INIT_TIMEOUT, NULL);
1911
1912                 /* Ensure that the connectable and discoverable states
1913                  * get correctly modified as this was a non-mgmt change.
1914                  */
1915                 if (!err)
1916                         hci_update_scan_state(hdev, dr.dev_opt);
1917                 break;
1918
1919         case HCISETLINKPOL:
1920                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1921                                    HCI_INIT_TIMEOUT, NULL);
1922                 break;
1923
1924         case HCISETLINKMODE:
1925                 hdev->link_mode = ((__u16) dr.dev_opt) &
1926                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1927                 break;
1928
1929         case HCISETPTYPE:
1930                 hdev->pkt_type = (__u16) dr.dev_opt;
1931                 break;
1932
1933         case HCISETACLMTU:
1934                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1935                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1936                 break;
1937
1938         case HCISETSCOMTU:
1939                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1940                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1941                 break;
1942
1943         default:
1944                 err = -EINVAL;
1945                 break;
1946         }
1947
1948 done:
1949         hci_dev_put(hdev);
1950         return err;
1951 }
1952
1953 int hci_get_dev_list(void __user *arg)
1954 {
1955         struct hci_dev *hdev;
1956         struct hci_dev_list_req *dl;
1957         struct hci_dev_req *dr;
1958         int n = 0, size, err;
1959         __u16 dev_num;
1960
1961         if (get_user(dev_num, (__u16 __user *) arg))
1962                 return -EFAULT;
1963
1964         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1965                 return -EINVAL;
1966
1967         size = sizeof(*dl) + dev_num * sizeof(*dr);
1968
1969         dl = kzalloc(size, GFP_KERNEL);
1970         if (!dl)
1971                 return -ENOMEM;
1972
1973         dr = dl->dev_req;
1974
1975         read_lock(&hci_dev_list_lock);
1976         list_for_each_entry(hdev, &hci_dev_list, list) {
1977                 unsigned long flags = hdev->flags;
1978
1979                 /* When the auto-off is configured it means the transport
1980                  * is running, but in that case still indicate that the
1981                  * device is actually down.
1982                  */
1983                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1984                         flags &= ~BIT(HCI_UP);
1985
1986                 (dr + n)->dev_id  = hdev->id;
1987                 (dr + n)->dev_opt = flags;
1988
1989                 if (++n >= dev_num)
1990                         break;
1991         }
1992         read_unlock(&hci_dev_list_lock);
1993
1994         dl->dev_num = n;
1995         size = sizeof(*dl) + n * sizeof(*dr);
1996
1997         err = copy_to_user(arg, dl, size);
1998         kfree(dl);
1999
2000         return err ? -EFAULT : 0;
2001 }
2002
2003 int hci_get_dev_info(void __user *arg)
2004 {
2005         struct hci_dev *hdev;
2006         struct hci_dev_info di;
2007         unsigned long flags;
2008         int err = 0;
2009
2010         if (copy_from_user(&di, arg, sizeof(di)))
2011                 return -EFAULT;
2012
2013         hdev = hci_dev_get(di.dev_id);
2014         if (!hdev)
2015                 return -ENODEV;
2016
2017         /* When the auto-off is configured it means the transport
2018          * is running, but in that case still indicate that the
2019          * device is actually down.
2020          */
2021         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2022                 flags = hdev->flags & ~BIT(HCI_UP);
2023         else
2024                 flags = hdev->flags;
2025
2026         strcpy(di.name, hdev->name);
2027         di.bdaddr   = hdev->bdaddr;
2028         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2029         di.flags    = flags;
2030         di.pkt_type = hdev->pkt_type;
2031         if (lmp_bredr_capable(hdev)) {
2032                 di.acl_mtu  = hdev->acl_mtu;
2033                 di.acl_pkts = hdev->acl_pkts;
2034                 di.sco_mtu  = hdev->sco_mtu;
2035                 di.sco_pkts = hdev->sco_pkts;
2036         } else {
2037                 di.acl_mtu  = hdev->le_mtu;
2038                 di.acl_pkts = hdev->le_pkts;
2039                 di.sco_mtu  = 0;
2040                 di.sco_pkts = 0;
2041         }
2042         di.link_policy = hdev->link_policy;
2043         di.link_mode   = hdev->link_mode;
2044
2045         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2046         memcpy(&di.features, &hdev->features, sizeof(di.features));
2047
2048         if (copy_to_user(arg, &di, sizeof(di)))
2049                 err = -EFAULT;
2050
2051         hci_dev_put(hdev);
2052
2053         return err;
2054 }
2055
2056 /* ---- Interface to HCI drivers ---- */
2057
2058 static int hci_rfkill_set_block(void *data, bool blocked)
2059 {
2060         struct hci_dev *hdev = data;
2061
2062         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2063
2064         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2065                 return -EBUSY;
2066
2067         if (blocked) {
2068                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2069                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2070                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2071                         hci_dev_do_close(hdev);
2072         } else {
2073                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2074         }
2075
2076         return 0;
2077 }
2078
2079 static const struct rfkill_ops hci_rfkill_ops = {
2080         .set_block = hci_rfkill_set_block,
2081 };
2082
2083 static void hci_power_on(struct work_struct *work)
2084 {
2085         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2086         int err;
2087
2088         BT_DBG("%s", hdev->name);
2089
2090         if (test_bit(HCI_UP, &hdev->flags) &&
2091             hci_dev_test_flag(hdev, HCI_MGMT) &&
2092             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2093                 cancel_delayed_work(&hdev->power_off);
2094                 hci_req_sync_lock(hdev);
2095                 err = __hci_req_hci_power_on(hdev);
2096                 hci_req_sync_unlock(hdev);
2097                 mgmt_power_on(hdev, err);
2098                 return;
2099         }
2100
2101         err = hci_dev_do_open(hdev);
2102         if (err < 0) {
2103                 hci_dev_lock(hdev);
2104                 mgmt_set_powered_failed(hdev, err);
2105                 hci_dev_unlock(hdev);
2106                 return;
2107         }
2108
2109         /* During the HCI setup phase, a few error conditions are
2110          * ignored and they need to be checked now. If they are still
2111          * valid, it is important to turn the device back off.
2112          */
2113         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2114             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2115             (hdev->dev_type == HCI_PRIMARY &&
2116              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2117              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2118                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2119                 hci_dev_do_close(hdev);
2120         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2121                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2122                                    HCI_AUTO_OFF_TIMEOUT);
2123         }
2124
2125         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2126                 /* For unconfigured devices, set the HCI_RAW flag
2127                  * so that userspace can easily identify them.
2128                  */
2129                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2130                         set_bit(HCI_RAW, &hdev->flags);
2131
2132                 /* For fully configured devices, this will send
2133                  * the Index Added event. For unconfigured devices,
2134                  * it will send Unconfigued Index Added event.
2135                  *
2136                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2137                  * and no event will be send.
2138                  */
2139                 mgmt_index_added(hdev);
2140         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2141                 /* When the controller is now configured, then it
2142                  * is important to clear the HCI_RAW flag.
2143                  */
2144                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2145                         clear_bit(HCI_RAW, &hdev->flags);
2146
2147                 /* Powering on the controller with HCI_CONFIG set only
2148                  * happens with the transition from unconfigured to
2149                  * configured. This will send the Index Added event.
2150                  */
2151                 mgmt_index_added(hdev);
2152         }
2153 }
2154
2155 static void hci_power_off(struct work_struct *work)
2156 {
2157         struct hci_dev *hdev = container_of(work, struct hci_dev,
2158                                             power_off.work);
2159
2160         BT_DBG("%s", hdev->name);
2161
2162         hci_dev_do_close(hdev);
2163 }
2164
2165 static void hci_error_reset(struct work_struct *work)
2166 {
2167         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2168
2169         BT_DBG("%s", hdev->name);
2170
2171         if (hdev->hw_error)
2172                 hdev->hw_error(hdev, hdev->hw_error_code);
2173         else
2174                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2175                        hdev->hw_error_code);
2176
2177         if (hci_dev_do_close(hdev))
2178                 return;
2179
2180         hci_dev_do_open(hdev);
2181 }
2182
2183 void hci_uuids_clear(struct hci_dev *hdev)
2184 {
2185         struct bt_uuid *uuid, *tmp;
2186
2187         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2188                 list_del(&uuid->list);
2189                 kfree(uuid);
2190         }
2191 }
2192
2193 void hci_link_keys_clear(struct hci_dev *hdev)
2194 {
2195         struct link_key *key;
2196
2197         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2198                 list_del_rcu(&key->list);
2199                 kfree_rcu(key, rcu);
2200         }
2201 }
2202
2203 void hci_smp_ltks_clear(struct hci_dev *hdev)
2204 {
2205         struct smp_ltk *k;
2206
2207         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2208                 list_del_rcu(&k->list);
2209                 kfree_rcu(k, rcu);
2210         }
2211 }
2212
2213 void hci_smp_irks_clear(struct hci_dev *hdev)
2214 {
2215         struct smp_irk *k;
2216
2217         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2218                 list_del_rcu(&k->list);
2219                 kfree_rcu(k, rcu);
2220         }
2221 }
2222
2223 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2224 {
2225         struct link_key *k;
2226
2227         rcu_read_lock();
2228         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2229                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2230                         rcu_read_unlock();
2231                         return k;
2232                 }
2233         }
2234         rcu_read_unlock();
2235
2236         return NULL;
2237 }
2238
2239 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2240                                u8 key_type, u8 old_key_type)
2241 {
2242         /* Legacy key */
2243         if (key_type < 0x03)
2244                 return true;
2245
2246         /* Debug keys are insecure so don't store them persistently */
2247         if (key_type == HCI_LK_DEBUG_COMBINATION)
2248                 return false;
2249
2250         /* Changed combination key and there's no previous one */
2251         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2252                 return false;
2253
2254         /* Security mode 3 case */
2255         if (!conn)
2256                 return true;
2257
2258         /* BR/EDR key derived using SC from an LE link */
2259         if (conn->type == LE_LINK)
2260                 return true;
2261
2262         /* Neither local nor remote side had no-bonding as requirement */
2263         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2264                 return true;
2265
2266         /* Local side had dedicated bonding as requirement */
2267         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2268                 return true;
2269
2270         /* Remote side had dedicated bonding as requirement */
2271         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2272                 return true;
2273
2274         /* If none of the above criteria match, then don't store the key
2275          * persistently */
2276         return false;
2277 }
2278
2279 static u8 ltk_role(u8 type)
2280 {
2281         if (type == SMP_LTK)
2282                 return HCI_ROLE_MASTER;
2283
2284         return HCI_ROLE_SLAVE;
2285 }
2286
2287 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2288                              u8 addr_type, u8 role)
2289 {
2290         struct smp_ltk *k;
2291
2292         rcu_read_lock();
2293         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2294                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2295                         continue;
2296
2297                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2298                         rcu_read_unlock();
2299                         return k;
2300                 }
2301         }
2302         rcu_read_unlock();
2303
2304         return NULL;
2305 }
2306
2307 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2308 {
2309         struct smp_irk *irk;
2310
2311         rcu_read_lock();
2312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313                 if (!bacmp(&irk->rpa, rpa)) {
2314                         rcu_read_unlock();
2315                         return irk;
2316                 }
2317         }
2318
2319         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2321                         bacpy(&irk->rpa, rpa);
2322                         rcu_read_unlock();
2323                         return irk;
2324                 }
2325         }
2326         rcu_read_unlock();
2327
2328         return NULL;
2329 }
2330
2331 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2332                                      u8 addr_type)
2333 {
2334         struct smp_irk *irk;
2335
2336         /* Identity Address must be public or static random */
2337         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2338                 return NULL;
2339
2340         rcu_read_lock();
2341         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2342                 if (addr_type == irk->addr_type &&
2343                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2344                         rcu_read_unlock();
2345                         return irk;
2346                 }
2347         }
2348         rcu_read_unlock();
2349
2350         return NULL;
2351 }
2352
2353 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2354                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2355                                   u8 pin_len, bool *persistent)
2356 {
2357         struct link_key *key, *old_key;
2358         u8 old_key_type;
2359
2360         old_key = hci_find_link_key(hdev, bdaddr);
2361         if (old_key) {
2362                 old_key_type = old_key->type;
2363                 key = old_key;
2364         } else {
2365                 old_key_type = conn ? conn->key_type : 0xff;
2366                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2367                 if (!key)
2368                         return NULL;
2369                 list_add_rcu(&key->list, &hdev->link_keys);
2370         }
2371
2372         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2373
2374         /* Some buggy controller combinations generate a changed
2375          * combination key for legacy pairing even when there's no
2376          * previous key */
2377         if (type == HCI_LK_CHANGED_COMBINATION &&
2378             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2379                 type = HCI_LK_COMBINATION;
2380                 if (conn)
2381                         conn->key_type = type;
2382         }
2383
2384         bacpy(&key->bdaddr, bdaddr);
2385         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2386         key->pin_len = pin_len;
2387
2388         if (type == HCI_LK_CHANGED_COMBINATION)
2389                 key->type = old_key_type;
2390         else
2391                 key->type = type;
2392
2393         if (persistent)
2394                 *persistent = hci_persistent_key(hdev, conn, type,
2395                                                  old_key_type);
2396
2397         return key;
2398 }
2399
2400 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401                             u8 addr_type, u8 type, u8 authenticated,
2402                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2403 {
2404         struct smp_ltk *key, *old_key;
2405         u8 role = ltk_role(type);
2406
2407         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2408         if (old_key)
2409                 key = old_key;
2410         else {
2411                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2412                 if (!key)
2413                         return NULL;
2414                 list_add_rcu(&key->list, &hdev->long_term_keys);
2415         }
2416
2417         bacpy(&key->bdaddr, bdaddr);
2418         key->bdaddr_type = addr_type;
2419         memcpy(key->val, tk, sizeof(key->val));
2420         key->authenticated = authenticated;
2421         key->ediv = ediv;
2422         key->rand = rand;
2423         key->enc_size = enc_size;
2424         key->type = type;
2425
2426         return key;
2427 }
2428
2429 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2431 {
2432         struct smp_irk *irk;
2433
2434         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2435         if (!irk) {
2436                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2437                 if (!irk)
2438                         return NULL;
2439
2440                 bacpy(&irk->bdaddr, bdaddr);
2441                 irk->addr_type = addr_type;
2442
2443                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2444         }
2445
2446         memcpy(irk->val, val, 16);
2447         bacpy(&irk->rpa, rpa);
2448
2449         return irk;
2450 }
2451
2452 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2453 {
2454         struct link_key *key;
2455
2456         key = hci_find_link_key(hdev, bdaddr);
2457         if (!key)
2458                 return -ENOENT;
2459
2460         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461
2462         list_del_rcu(&key->list);
2463         kfree_rcu(key, rcu);
2464
2465         return 0;
2466 }
2467
2468 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2469 {
2470         struct smp_ltk *k;
2471         int removed = 0;
2472
2473         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2474                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2475                         continue;
2476
2477                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2478
2479                 list_del_rcu(&k->list);
2480                 kfree_rcu(k, rcu);
2481                 removed++;
2482         }
2483
2484         return removed ? 0 : -ENOENT;
2485 }
2486
2487 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2488 {
2489         struct smp_irk *k;
2490
2491         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2492                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2493                         continue;
2494
2495                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2496
2497                 list_del_rcu(&k->list);
2498                 kfree_rcu(k, rcu);
2499         }
2500 }
2501
2502 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2503 {
2504         struct smp_ltk *k;
2505         struct smp_irk *irk;
2506         u8 addr_type;
2507
2508         if (type == BDADDR_BREDR) {
2509                 if (hci_find_link_key(hdev, bdaddr))
2510                         return true;
2511                 return false;
2512         }
2513
2514         /* Convert to HCI addr type which struct smp_ltk uses */
2515         if (type == BDADDR_LE_PUBLIC)
2516                 addr_type = ADDR_LE_DEV_PUBLIC;
2517         else
2518                 addr_type = ADDR_LE_DEV_RANDOM;
2519
2520         irk = hci_get_irk(hdev, bdaddr, addr_type);
2521         if (irk) {
2522                 bdaddr = &irk->bdaddr;
2523                 addr_type = irk->addr_type;
2524         }
2525
2526         rcu_read_lock();
2527         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2528                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2529                         rcu_read_unlock();
2530                         return true;
2531                 }
2532         }
2533         rcu_read_unlock();
2534
2535         return false;
2536 }
2537
2538 /* HCI command timer function */
2539 static void hci_cmd_timeout(struct work_struct *work)
2540 {
2541         struct hci_dev *hdev = container_of(work, struct hci_dev,
2542                                             cmd_timer.work);
2543
2544         if (hdev->sent_cmd) {
2545                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2546                 u16 opcode = __le16_to_cpu(sent->opcode);
2547
2548                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2549         } else {
2550                 BT_ERR("%s command tx timeout", hdev->name);
2551         }
2552
2553         atomic_set(&hdev->cmd_cnt, 1);
2554         queue_work(hdev->workqueue, &hdev->cmd_work);
2555 }
2556
2557 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2558                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2559 {
2560         struct oob_data *data;
2561
2562         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2563                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2564                         continue;
2565                 if (data->bdaddr_type != bdaddr_type)
2566                         continue;
2567                 return data;
2568         }
2569
2570         return NULL;
2571 }
2572
2573 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2574                                u8 bdaddr_type)
2575 {
2576         struct oob_data *data;
2577
2578         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2579         if (!data)
2580                 return -ENOENT;
2581
2582         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2583
2584         list_del(&data->list);
2585         kfree(data);
2586
2587         return 0;
2588 }
2589
2590 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2591 {
2592         struct oob_data *data, *n;
2593
2594         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2595                 list_del(&data->list);
2596                 kfree(data);
2597         }
2598 }
2599
2600 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2601                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2602                             u8 *hash256, u8 *rand256)
2603 {
2604         struct oob_data *data;
2605
2606         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2607         if (!data) {
2608                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2609                 if (!data)
2610                         return -ENOMEM;
2611
2612                 bacpy(&data->bdaddr, bdaddr);
2613                 data->bdaddr_type = bdaddr_type;
2614                 list_add(&data->list, &hdev->remote_oob_data);
2615         }
2616
2617         if (hash192 && rand192) {
2618                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2619                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2620                 if (hash256 && rand256)
2621                         data->present = 0x03;
2622         } else {
2623                 memset(data->hash192, 0, sizeof(data->hash192));
2624                 memset(data->rand192, 0, sizeof(data->rand192));
2625                 if (hash256 && rand256)
2626                         data->present = 0x02;
2627                 else
2628                         data->present = 0x00;
2629         }
2630
2631         if (hash256 && rand256) {
2632                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2633                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2634         } else {
2635                 memset(data->hash256, 0, sizeof(data->hash256));
2636                 memset(data->rand256, 0, sizeof(data->rand256));
2637                 if (hash192 && rand192)
2638                         data->present = 0x01;
2639         }
2640
2641         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2642
2643         return 0;
2644 }
2645
2646 /* This function requires the caller holds hdev->lock */
2647 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2648 {
2649         struct adv_info *adv_instance;
2650
2651         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2652                 if (adv_instance->instance == instance)
2653                         return adv_instance;
2654         }
2655
2656         return NULL;
2657 }
2658
2659 /* This function requires the caller holds hdev->lock */
2660 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2661 {
2662         struct adv_info *cur_instance;
2663
2664         cur_instance = hci_find_adv_instance(hdev, instance);
2665         if (!cur_instance)
2666                 return NULL;
2667
2668         if (cur_instance == list_last_entry(&hdev->adv_instances,
2669                                             struct adv_info, list))
2670                 return list_first_entry(&hdev->adv_instances,
2671                                                  struct adv_info, list);
2672         else
2673                 return list_next_entry(cur_instance, list);
2674 }
2675
2676 /* This function requires the caller holds hdev->lock */
2677 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2678 {
2679         struct adv_info *adv_instance;
2680
2681         adv_instance = hci_find_adv_instance(hdev, instance);
2682         if (!adv_instance)
2683                 return -ENOENT;
2684
2685         BT_DBG("%s removing %dMR", hdev->name, instance);
2686
2687         if (hdev->cur_adv_instance == instance) {
2688                 if (hdev->adv_instance_timeout) {
2689                         cancel_delayed_work(&hdev->adv_instance_expire);
2690                         hdev->adv_instance_timeout = 0;
2691                 }
2692                 hdev->cur_adv_instance = 0x00;
2693         }
2694
2695         list_del(&adv_instance->list);
2696         kfree(adv_instance);
2697
2698         hdev->adv_instance_cnt--;
2699
2700         return 0;
2701 }
2702
2703 /* This function requires the caller holds hdev->lock */
2704 void hci_adv_instances_clear(struct hci_dev *hdev)
2705 {
2706         struct adv_info *adv_instance, *n;
2707
2708         if (hdev->adv_instance_timeout) {
2709                 cancel_delayed_work(&hdev->adv_instance_expire);
2710                 hdev->adv_instance_timeout = 0;
2711         }
2712
2713         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2714                 list_del(&adv_instance->list);
2715                 kfree(adv_instance);
2716         }
2717
2718         hdev->adv_instance_cnt = 0;
2719         hdev->cur_adv_instance = 0x00;
2720 }
2721
2722 /* This function requires the caller holds hdev->lock */
2723 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2724                          u16 adv_data_len, u8 *adv_data,
2725                          u16 scan_rsp_len, u8 *scan_rsp_data,
2726                          u16 timeout, u16 duration)
2727 {
2728         struct adv_info *adv_instance;
2729
2730         adv_instance = hci_find_adv_instance(hdev, instance);
2731         if (adv_instance) {
2732                 memset(adv_instance->adv_data, 0,
2733                        sizeof(adv_instance->adv_data));
2734                 memset(adv_instance->scan_rsp_data, 0,
2735                        sizeof(adv_instance->scan_rsp_data));
2736         } else {
2737                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2738                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2739                         return -EOVERFLOW;
2740
2741                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2742                 if (!adv_instance)
2743                         return -ENOMEM;
2744
2745                 adv_instance->pending = true;
2746                 adv_instance->instance = instance;
2747                 list_add(&adv_instance->list, &hdev->adv_instances);
2748                 hdev->adv_instance_cnt++;
2749         }
2750
2751         adv_instance->flags = flags;
2752         adv_instance->adv_data_len = adv_data_len;
2753         adv_instance->scan_rsp_len = scan_rsp_len;
2754
2755         if (adv_data_len)
2756                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2757
2758         if (scan_rsp_len)
2759                 memcpy(adv_instance->scan_rsp_data,
2760                        scan_rsp_data, scan_rsp_len);
2761
2762         adv_instance->timeout = timeout;
2763         adv_instance->remaining_time = timeout;
2764
2765         if (duration == 0)
2766                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2767         else
2768                 adv_instance->duration = duration;
2769
2770         BT_DBG("%s for %dMR", hdev->name, instance);
2771
2772         return 0;
2773 }
2774
2775 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2776                                          bdaddr_t *bdaddr, u8 type)
2777 {
2778         struct bdaddr_list *b;
2779
2780         list_for_each_entry(b, bdaddr_list, list) {
2781                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2782                         return b;
2783         }
2784
2785         return NULL;
2786 }
2787
2788 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2789 {
2790         struct bdaddr_list *b, *n;
2791
2792         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2793                 list_del(&b->list);
2794                 kfree(b);
2795         }
2796 }
2797
2798 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2799 {
2800         struct bdaddr_list *entry;
2801
2802         if (!bacmp(bdaddr, BDADDR_ANY))
2803                 return -EBADF;
2804
2805         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2806                 return -EEXIST;
2807
2808         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2809         if (!entry)
2810                 return -ENOMEM;
2811
2812         bacpy(&entry->bdaddr, bdaddr);
2813         entry->bdaddr_type = type;
2814
2815         list_add(&entry->list, list);
2816
2817         return 0;
2818 }
2819
2820 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2821 {
2822         struct bdaddr_list *entry;
2823
2824         if (!bacmp(bdaddr, BDADDR_ANY)) {
2825                 hci_bdaddr_list_clear(list);
2826                 return 0;
2827         }
2828
2829         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2830         if (!entry)
2831                 return -ENOENT;
2832
2833         list_del(&entry->list);
2834         kfree(entry);
2835
2836         return 0;
2837 }
2838
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2841                                                bdaddr_t *addr, u8 addr_type)
2842 {
2843         struct hci_conn_params *params;
2844
2845         list_for_each_entry(params, &hdev->le_conn_params, list) {
2846                 if (bacmp(&params->addr, addr) == 0 &&
2847                     params->addr_type == addr_type) {
2848                         return params;
2849                 }
2850         }
2851
2852         return NULL;
2853 }
2854
2855 /* This function requires the caller holds hdev->lock */
2856 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2857                                                   bdaddr_t *addr, u8 addr_type)
2858 {
2859         struct hci_conn_params *param;
2860
2861         list_for_each_entry(param, list, action) {
2862                 if (bacmp(&param->addr, addr) == 0 &&
2863                     param->addr_type == addr_type)
2864                         return param;
2865         }
2866
2867         return NULL;
2868 }
2869
2870 /* This function requires the caller holds hdev->lock */
2871 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2872                                             bdaddr_t *addr, u8 addr_type)
2873 {
2874         struct hci_conn_params *params;
2875
2876         params = hci_conn_params_lookup(hdev, addr, addr_type);
2877         if (params)
2878                 return params;
2879
2880         params = kzalloc(sizeof(*params), GFP_KERNEL);
2881         if (!params) {
2882                 BT_ERR("Out of memory");
2883                 return NULL;
2884         }
2885
2886         bacpy(&params->addr, addr);
2887         params->addr_type = addr_type;
2888
2889         list_add(&params->list, &hdev->le_conn_params);
2890         INIT_LIST_HEAD(&params->action);
2891
2892         params->conn_min_interval = hdev->le_conn_min_interval;
2893         params->conn_max_interval = hdev->le_conn_max_interval;
2894         params->conn_latency = hdev->le_conn_latency;
2895         params->supervision_timeout = hdev->le_supv_timeout;
2896         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2897
2898         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2899
2900         return params;
2901 }
2902
2903 static void hci_conn_params_free(struct hci_conn_params *params)
2904 {
2905         if (params->conn) {
2906                 hci_conn_drop(params->conn);
2907                 hci_conn_put(params->conn);
2908         }
2909
2910         list_del(&params->action);
2911         list_del(&params->list);
2912         kfree(params);
2913 }
2914
2915 /* This function requires the caller holds hdev->lock */
2916 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2917 {
2918         struct hci_conn_params *params;
2919
2920         params = hci_conn_params_lookup(hdev, addr, addr_type);
2921         if (!params)
2922                 return;
2923
2924         hci_conn_params_free(params);
2925
2926         hci_update_background_scan(hdev);
2927
2928         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2929 }
2930
2931 /* This function requires the caller holds hdev->lock */
2932 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2933 {
2934         struct hci_conn_params *params, *tmp;
2935
2936         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2937                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2938                         continue;
2939
2940                 /* If trying to estabilish one time connection to disabled
2941                  * device, leave the params, but mark them as just once.
2942                  */
2943                 if (params->explicit_connect) {
2944                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2945                         continue;
2946                 }
2947
2948                 list_del(&params->list);
2949                 kfree(params);
2950         }
2951
2952         BT_DBG("All LE disabled connection parameters were removed");
2953 }
2954
2955 /* This function requires the caller holds hdev->lock */
2956 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2957 {
2958         struct hci_conn_params *params, *tmp;
2959
2960         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2961                 hci_conn_params_free(params);
2962
2963         BT_DBG("All LE connection parameters were removed");
2964 }
2965
2966 /* Copy the Identity Address of the controller.
2967  *
2968  * If the controller has a public BD_ADDR, then by default use that one.
2969  * If this is a LE only controller without a public address, default to
2970  * the static random address.
2971  *
2972  * For debugging purposes it is possible to force controllers with a
2973  * public address to use the static random address instead.
2974  *
2975  * In case BR/EDR has been disabled on a dual-mode controller and
2976  * userspace has configured a static address, then that address
2977  * becomes the identity address instead of the public BR/EDR address.
2978  */
2979 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2980                                u8 *bdaddr_type)
2981 {
2982         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2983             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2984             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2985              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2986                 bacpy(bdaddr, &hdev->static_addr);
2987                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2988         } else {
2989                 bacpy(bdaddr, &hdev->bdaddr);
2990                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2991         }
2992 }
2993
2994 /* Alloc HCI device */
2995 struct hci_dev *hci_alloc_dev(void)
2996 {
2997         struct hci_dev *hdev;
2998
2999         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3000         if (!hdev)
3001                 return NULL;
3002
3003         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3004         hdev->esco_type = (ESCO_HV1);
3005         hdev->link_mode = (HCI_LM_ACCEPT);
3006         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3007         hdev->io_capability = 0x03;     /* No Input No Output */
3008         hdev->manufacturer = 0xffff;    /* Default to internal use */
3009         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3010         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3011         hdev->adv_instance_cnt = 0;
3012         hdev->cur_adv_instance = 0x00;
3013         hdev->adv_instance_timeout = 0;
3014
3015         hdev->sniff_max_interval = 800;
3016         hdev->sniff_min_interval = 80;
3017
3018         hdev->le_adv_channel_map = 0x07;
3019         hdev->le_adv_min_interval = 0x0800;
3020         hdev->le_adv_max_interval = 0x0800;
3021         hdev->le_scan_interval = 0x0060;
3022         hdev->le_scan_window = 0x0030;
3023         hdev->le_conn_min_interval = 0x0018;
3024         hdev->le_conn_max_interval = 0x0028;
3025         hdev->le_conn_latency = 0x0000;
3026         hdev->le_supv_timeout = 0x002a;
3027         hdev->le_def_tx_len = 0x001b;
3028         hdev->le_def_tx_time = 0x0148;
3029         hdev->le_max_tx_len = 0x001b;
3030         hdev->le_max_tx_time = 0x0148;
3031         hdev->le_max_rx_len = 0x001b;
3032         hdev->le_max_rx_time = 0x0148;
3033
3034         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3035         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3036         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3037         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3038
3039         mutex_init(&hdev->lock);
3040         mutex_init(&hdev->req_lock);
3041
3042         INIT_LIST_HEAD(&hdev->mgmt_pending);
3043         INIT_LIST_HEAD(&hdev->blacklist);
3044         INIT_LIST_HEAD(&hdev->whitelist);
3045         INIT_LIST_HEAD(&hdev->uuids);
3046         INIT_LIST_HEAD(&hdev->link_keys);
3047         INIT_LIST_HEAD(&hdev->long_term_keys);
3048         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3049         INIT_LIST_HEAD(&hdev->remote_oob_data);
3050         INIT_LIST_HEAD(&hdev->le_white_list);
3051         INIT_LIST_HEAD(&hdev->le_conn_params);
3052         INIT_LIST_HEAD(&hdev->pend_le_conns);
3053         INIT_LIST_HEAD(&hdev->pend_le_reports);
3054         INIT_LIST_HEAD(&hdev->conn_hash.list);
3055         INIT_LIST_HEAD(&hdev->adv_instances);
3056
3057         INIT_WORK(&hdev->rx_work, hci_rx_work);
3058         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3059         INIT_WORK(&hdev->tx_work, hci_tx_work);
3060         INIT_WORK(&hdev->power_on, hci_power_on);
3061         INIT_WORK(&hdev->error_reset, hci_error_reset);
3062
3063         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3064
3065         skb_queue_head_init(&hdev->rx_q);
3066         skb_queue_head_init(&hdev->cmd_q);
3067         skb_queue_head_init(&hdev->raw_q);
3068
3069         init_waitqueue_head(&hdev->req_wait_q);
3070
3071         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3072
3073         hci_request_setup(hdev);
3074
3075         hci_init_sysfs(hdev);
3076         discovery_init(hdev);
3077
3078         return hdev;
3079 }
3080 EXPORT_SYMBOL(hci_alloc_dev);
3081
3082 /* Free HCI device */
3083 void hci_free_dev(struct hci_dev *hdev)
3084 {
3085         /* will free via device release */
3086         put_device(&hdev->dev);
3087 }
3088 EXPORT_SYMBOL(hci_free_dev);
3089
3090 /* Register HCI device */
3091 int hci_register_dev(struct hci_dev *hdev)
3092 {
3093         int id, error;
3094
3095         if (!hdev->open || !hdev->close || !hdev->send)
3096                 return -EINVAL;
3097
3098         /* Do not allow HCI_AMP devices to register at index 0,
3099          * so the index can be used as the AMP controller ID.
3100          */
3101         switch (hdev->dev_type) {
3102         case HCI_PRIMARY:
3103                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3104                 break;
3105         case HCI_AMP:
3106                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3107                 break;
3108         default:
3109                 return -EINVAL;
3110         }
3111
3112         if (id < 0)
3113                 return id;
3114
3115         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3116         hdev->id = id;
3117
3118         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3119
3120         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3121         if (!hdev->workqueue) {
3122                 error = -ENOMEM;
3123                 goto err;
3124         }
3125
3126         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3127                                                       hdev->name);
3128         if (!hdev->req_workqueue) {
3129                 destroy_workqueue(hdev->workqueue);
3130                 error = -ENOMEM;
3131                 goto err;
3132         }
3133
3134         if (!IS_ERR_OR_NULL(bt_debugfs))
3135                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3136
3137         dev_set_name(&hdev->dev, "%s", hdev->name);
3138
3139         error = device_add(&hdev->dev);
3140         if (error < 0)
3141                 goto err_wqueue;
3142
3143         hci_leds_init(hdev);
3144
3145         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3146                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3147                                     hdev);
3148         if (hdev->rfkill) {
3149                 if (rfkill_register(hdev->rfkill) < 0) {
3150                         rfkill_destroy(hdev->rfkill);
3151                         hdev->rfkill = NULL;
3152                 }
3153         }
3154
3155         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3156                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3157
3158         hci_dev_set_flag(hdev, HCI_SETUP);
3159         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3160
3161         if (hdev->dev_type == HCI_PRIMARY) {
3162                 /* Assume BR/EDR support until proven otherwise (such as
3163                  * through reading supported features during init.
3164                  */
3165                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3166         }
3167
3168         write_lock(&hci_dev_list_lock);
3169         list_add(&hdev->list, &hci_dev_list);
3170         write_unlock(&hci_dev_list_lock);
3171
3172         /* Devices that are marked for raw-only usage are unconfigured
3173          * and should not be included in normal operation.
3174          */
3175         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3176                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3177
3178         hci_sock_dev_event(hdev, HCI_DEV_REG);
3179         hci_dev_hold(hdev);
3180
3181         queue_work(hdev->req_workqueue, &hdev->power_on);
3182
3183         return id;
3184
3185 err_wqueue:
3186         debugfs_remove_recursive(hdev->debugfs);
3187         destroy_workqueue(hdev->workqueue);
3188         destroy_workqueue(hdev->req_workqueue);
3189 err:
3190         ida_simple_remove(&hci_index_ida, hdev->id);
3191
3192         return error;
3193 }
3194 EXPORT_SYMBOL(hci_register_dev);
3195
3196 /* Unregister HCI device */
3197 void hci_unregister_dev(struct hci_dev *hdev)
3198 {
3199         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3200
3201         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3202
3203         write_lock(&hci_dev_list_lock);
3204         list_del(&hdev->list);
3205         write_unlock(&hci_dev_list_lock);
3206
3207         cancel_work_sync(&hdev->power_on);
3208
3209         hci_dev_do_close(hdev);
3210
3211         if (!test_bit(HCI_INIT, &hdev->flags) &&
3212             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3213             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3214                 hci_dev_lock(hdev);
3215                 mgmt_index_removed(hdev);
3216                 hci_dev_unlock(hdev);
3217         }
3218
3219         /* mgmt_index_removed should take care of emptying the
3220          * pending list */
3221         BUG_ON(!list_empty(&hdev->mgmt_pending));
3222
3223         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3224
3225         if (hdev->rfkill) {
3226                 rfkill_unregister(hdev->rfkill);
3227                 rfkill_destroy(hdev->rfkill);
3228         }
3229
3230         device_del(&hdev->dev);
3231         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3232         hci_dev_put(hdev);
3233 }
3234 EXPORT_SYMBOL(hci_unregister_dev);
3235
3236 /* Cleanup HCI device */
3237 void hci_cleanup_dev(struct hci_dev *hdev)
3238 {
3239         debugfs_remove_recursive(hdev->debugfs);
3240         kfree_const(hdev->hw_info);
3241         kfree_const(hdev->fw_info);
3242
3243         destroy_workqueue(hdev->workqueue);
3244         destroy_workqueue(hdev->req_workqueue);
3245
3246         hci_dev_lock(hdev);
3247         hci_bdaddr_list_clear(&hdev->blacklist);
3248         hci_bdaddr_list_clear(&hdev->whitelist);
3249         hci_uuids_clear(hdev);
3250         hci_link_keys_clear(hdev);
3251         hci_smp_ltks_clear(hdev);
3252         hci_smp_irks_clear(hdev);
3253         hci_remote_oob_data_clear(hdev);
3254         hci_adv_instances_clear(hdev);
3255         hci_bdaddr_list_clear(&hdev->le_white_list);
3256         hci_conn_params_clear_all(hdev);
3257         hci_discovery_filter_clear(hdev);
3258         hci_dev_unlock(hdev);
3259
3260         ida_simple_remove(&hci_index_ida, hdev->id);
3261 }
3262
3263 /* Suspend HCI device */
3264 int hci_suspend_dev(struct hci_dev *hdev)
3265 {
3266         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3267         return 0;
3268 }
3269 EXPORT_SYMBOL(hci_suspend_dev);
3270
3271 /* Resume HCI device */
3272 int hci_resume_dev(struct hci_dev *hdev)
3273 {
3274         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3275         return 0;
3276 }
3277 EXPORT_SYMBOL(hci_resume_dev);
3278
3279 /* Reset HCI device */
3280 int hci_reset_dev(struct hci_dev *hdev)
3281 {
3282         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3283         struct sk_buff *skb;
3284
3285         skb = bt_skb_alloc(3, GFP_ATOMIC);
3286         if (!skb)
3287                 return -ENOMEM;
3288
3289         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3290         skb_put_data(skb, hw_err, 3);
3291
3292         /* Send Hardware Error to upper stack */
3293         return hci_recv_frame(hdev, skb);
3294 }
3295 EXPORT_SYMBOL(hci_reset_dev);
3296
3297 /* Receive frame from HCI drivers */
3298 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3299 {
3300         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3301                       && !test_bit(HCI_INIT, &hdev->flags))) {
3302                 kfree_skb(skb);
3303                 return -ENXIO;
3304         }
3305
3306         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3307             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3308             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3309                 kfree_skb(skb);
3310                 return -EINVAL;
3311         }
3312
3313         /* Incoming skb */
3314         bt_cb(skb)->incoming = 1;
3315
3316         /* Time stamp */
3317         __net_timestamp(skb);
3318
3319         skb_queue_tail(&hdev->rx_q, skb);
3320         queue_work(hdev->workqueue, &hdev->rx_work);
3321
3322         return 0;
3323 }
3324 EXPORT_SYMBOL(hci_recv_frame);
3325
3326 /* Receive diagnostic message from HCI drivers */
3327 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3328 {
3329         /* Mark as diagnostic packet */
3330         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3331
3332         /* Time stamp */
3333         __net_timestamp(skb);
3334
3335         skb_queue_tail(&hdev->rx_q, skb);
3336         queue_work(hdev->workqueue, &hdev->rx_work);
3337
3338         return 0;
3339 }
3340 EXPORT_SYMBOL(hci_recv_diag);
3341
3342 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3343 {
3344         va_list vargs;
3345
3346         va_start(vargs, fmt);
3347         kfree_const(hdev->hw_info);
3348         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3349         va_end(vargs);
3350 }
3351 EXPORT_SYMBOL(hci_set_hw_info);
3352
3353 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3354 {
3355         va_list vargs;
3356
3357         va_start(vargs, fmt);
3358         kfree_const(hdev->fw_info);
3359         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3360         va_end(vargs);
3361 }
3362 EXPORT_SYMBOL(hci_set_fw_info);
3363
3364 /* ---- Interface to upper protocols ---- */
3365
3366 int hci_register_cb(struct hci_cb *cb)
3367 {
3368         BT_DBG("%p name %s", cb, cb->name);
3369
3370         mutex_lock(&hci_cb_list_lock);
3371         list_add_tail(&cb->list, &hci_cb_list);
3372         mutex_unlock(&hci_cb_list_lock);
3373
3374         return 0;
3375 }
3376 EXPORT_SYMBOL(hci_register_cb);
3377
3378 int hci_unregister_cb(struct hci_cb *cb)
3379 {
3380         BT_DBG("%p name %s", cb, cb->name);
3381
3382         mutex_lock(&hci_cb_list_lock);
3383         list_del(&cb->list);
3384         mutex_unlock(&hci_cb_list_lock);
3385
3386         return 0;
3387 }
3388 EXPORT_SYMBOL(hci_unregister_cb);
3389
3390 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3391 {
3392         int err;
3393
3394         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3395                skb->len);
3396
3397         /* Time stamp */
3398         __net_timestamp(skb);
3399
3400         /* Send copy to monitor */
3401         hci_send_to_monitor(hdev, skb);
3402
3403         if (atomic_read(&hdev->promisc)) {
3404                 /* Send copy to the sockets */
3405                 hci_send_to_sock(hdev, skb);
3406         }
3407
3408         /* Get rid of skb owner, prior to sending to the driver. */
3409         skb_orphan(skb);
3410
3411         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3412                 kfree_skb(skb);
3413                 return;
3414         }
3415
3416         err = hdev->send(hdev, skb);
3417         if (err < 0) {
3418                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3419                 kfree_skb(skb);
3420         }
3421 }
3422
3423 /* Send HCI command */
3424 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3425                  const void *param)
3426 {
3427         struct sk_buff *skb;
3428
3429         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3430
3431         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3432         if (!skb) {
3433                 BT_ERR("%s no memory for command", hdev->name);
3434                 return -ENOMEM;
3435         }
3436
3437         /* Stand-alone HCI commands must be flagged as
3438          * single-command requests.
3439          */
3440         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3441
3442         skb_queue_tail(&hdev->cmd_q, skb);
3443         queue_work(hdev->workqueue, &hdev->cmd_work);
3444
3445         return 0;
3446 }
3447
3448 /* Get data from the previously sent command */
3449 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3450 {
3451         struct hci_command_hdr *hdr;
3452
3453         if (!hdev->sent_cmd)
3454                 return NULL;
3455
3456         hdr = (void *) hdev->sent_cmd->data;
3457
3458         if (hdr->opcode != cpu_to_le16(opcode))
3459                 return NULL;
3460
3461         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3462
3463         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3464 }
3465
3466 /* Send HCI command and wait for command commplete event */
3467 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3468                              const void *param, u32 timeout)
3469 {
3470         struct sk_buff *skb;
3471
3472         if (!test_bit(HCI_UP, &hdev->flags))
3473                 return ERR_PTR(-ENETDOWN);
3474
3475         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3476
3477         hci_req_sync_lock(hdev);
3478         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3479         hci_req_sync_unlock(hdev);
3480
3481         return skb;
3482 }
3483 EXPORT_SYMBOL(hci_cmd_sync);
3484
3485 /* Send ACL data */
3486 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3487 {
3488         struct hci_acl_hdr *hdr;
3489         int len = skb->len;
3490
3491         skb_push(skb, HCI_ACL_HDR_SIZE);
3492         skb_reset_transport_header(skb);
3493         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3494         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3495         hdr->dlen   = cpu_to_le16(len);
3496 }
3497
3498 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3499                           struct sk_buff *skb, __u16 flags)
3500 {
3501         struct hci_conn *conn = chan->conn;
3502         struct hci_dev *hdev = conn->hdev;
3503         struct sk_buff *list;
3504
3505         skb->len = skb_headlen(skb);
3506         skb->data_len = 0;
3507
3508         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3509
3510         switch (hdev->dev_type) {
3511         case HCI_PRIMARY:
3512                 hci_add_acl_hdr(skb, conn->handle, flags);
3513                 break;
3514         case HCI_AMP:
3515                 hci_add_acl_hdr(skb, chan->handle, flags);
3516                 break;
3517         default:
3518                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3519                 return;
3520         }
3521
3522         list = skb_shinfo(skb)->frag_list;
3523         if (!list) {
3524                 /* Non fragmented */
3525                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3526
3527                 skb_queue_tail(queue, skb);
3528         } else {
3529                 /* Fragmented */
3530                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3531
3532                 skb_shinfo(skb)->frag_list = NULL;
3533
3534                 /* Queue all fragments atomically. We need to use spin_lock_bh
3535                  * here because of 6LoWPAN links, as there this function is
3536                  * called from softirq and using normal spin lock could cause
3537                  * deadlocks.
3538                  */
3539                 spin_lock_bh(&queue->lock);
3540
3541                 __skb_queue_tail(queue, skb);
3542
3543                 flags &= ~ACL_START;
3544                 flags |= ACL_CONT;
3545                 do {
3546                         skb = list; list = list->next;
3547
3548                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3549                         hci_add_acl_hdr(skb, conn->handle, flags);
3550
3551                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3552
3553                         __skb_queue_tail(queue, skb);
3554                 } while (list);
3555
3556                 spin_unlock_bh(&queue->lock);
3557         }
3558 }
3559
3560 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3561 {
3562         struct hci_dev *hdev = chan->conn->hdev;
3563
3564         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3565
3566         hci_queue_acl(chan, &chan->data_q, skb, flags);
3567
3568         queue_work(hdev->workqueue, &hdev->tx_work);
3569 }
3570
3571 /* Send SCO data */
3572 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3573 {
3574         struct hci_dev *hdev = conn->hdev;
3575         struct hci_sco_hdr hdr;
3576
3577         BT_DBG("%s len %d", hdev->name, skb->len);
3578
3579         hdr.handle = cpu_to_le16(conn->handle);
3580         hdr.dlen   = skb->len;
3581
3582         skb_push(skb, HCI_SCO_HDR_SIZE);
3583         skb_reset_transport_header(skb);
3584         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3585
3586         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3587
3588         skb_queue_tail(&conn->data_q, skb);
3589         queue_work(hdev->workqueue, &hdev->tx_work);
3590 }
3591
3592 /* ---- HCI TX task (outgoing data) ---- */
3593
3594 /* HCI Connection scheduler */
3595 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3596                                      int *quote)
3597 {
3598         struct hci_conn_hash *h = &hdev->conn_hash;
3599         struct hci_conn *conn = NULL, *c;
3600         unsigned int num = 0, min = ~0;
3601
3602         /* We don't have to lock device here. Connections are always
3603          * added and removed with TX task disabled. */
3604
3605         rcu_read_lock();
3606
3607         list_for_each_entry_rcu(c, &h->list, list) {
3608                 if (c->type != type || skb_queue_empty(&c->data_q))
3609                         continue;
3610
3611                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3612                         continue;
3613
3614                 num++;
3615
3616                 if (c->sent < min) {
3617                         min  = c->sent;
3618                         conn = c;
3619                 }
3620
3621                 if (hci_conn_num(hdev, type) == num)
3622                         break;
3623         }
3624
3625         rcu_read_unlock();
3626
3627         if (conn) {
3628                 int cnt, q;
3629
3630                 switch (conn->type) {
3631                 case ACL_LINK:
3632                         cnt = hdev->acl_cnt;
3633                         break;
3634                 case SCO_LINK:
3635                 case ESCO_LINK:
3636                         cnt = hdev->sco_cnt;
3637                         break;
3638                 case LE_LINK:
3639                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3640                         break;
3641                 default:
3642                         cnt = 0;
3643                         BT_ERR("Unknown link type");
3644                 }
3645
3646                 q = cnt / num;
3647                 *quote = q ? q : 1;
3648         } else
3649                 *quote = 0;
3650
3651         BT_DBG("conn %p quote %d", conn, *quote);
3652         return conn;
3653 }
3654
3655 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3656 {
3657         struct hci_conn_hash *h = &hdev->conn_hash;
3658         struct hci_conn *c;
3659
3660         BT_ERR("%s link tx timeout", hdev->name);
3661
3662         rcu_read_lock();
3663
3664         /* Kill stalled connections */
3665         list_for_each_entry_rcu(c, &h->list, list) {
3666                 if (c->type == type && c->sent) {
3667                         BT_ERR("%s killing stalled connection %pMR",
3668                                hdev->name, &c->dst);
3669                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3670                 }
3671         }
3672
3673         rcu_read_unlock();
3674 }
3675
3676 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3677                                       int *quote)
3678 {
3679         struct hci_conn_hash *h = &hdev->conn_hash;
3680         struct hci_chan *chan = NULL;
3681         unsigned int num = 0, min = ~0, cur_prio = 0;
3682         struct hci_conn *conn;
3683         int cnt, q, conn_num = 0;
3684
3685         BT_DBG("%s", hdev->name);
3686
3687         rcu_read_lock();
3688
3689         list_for_each_entry_rcu(conn, &h->list, list) {
3690                 struct hci_chan *tmp;
3691
3692                 if (conn->type != type)
3693                         continue;
3694
3695                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3696                         continue;
3697
3698                 conn_num++;
3699
3700                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3701                         struct sk_buff *skb;
3702
3703                         if (skb_queue_empty(&tmp->data_q))
3704                                 continue;
3705
3706                         skb = skb_peek(&tmp->data_q);
3707                         if (skb->priority < cur_prio)
3708                                 continue;
3709
3710                         if (skb->priority > cur_prio) {
3711                                 num = 0;
3712                                 min = ~0;
3713                                 cur_prio = skb->priority;
3714                         }
3715
3716                         num++;
3717
3718                         if (conn->sent < min) {
3719                                 min  = conn->sent;
3720                                 chan = tmp;
3721                         }
3722                 }
3723
3724                 if (hci_conn_num(hdev, type) == conn_num)
3725                         break;
3726         }
3727
3728         rcu_read_unlock();
3729
3730         if (!chan)
3731                 return NULL;
3732
3733         switch (chan->conn->type) {
3734         case ACL_LINK:
3735                 cnt = hdev->acl_cnt;
3736                 break;
3737         case AMP_LINK:
3738                 cnt = hdev->block_cnt;
3739                 break;
3740         case SCO_LINK:
3741         case ESCO_LINK:
3742                 cnt = hdev->sco_cnt;
3743                 break;
3744         case LE_LINK:
3745                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3746                 break;
3747         default:
3748                 cnt = 0;
3749                 BT_ERR("Unknown link type");
3750         }
3751
3752         q = cnt / num;
3753         *quote = q ? q : 1;
3754         BT_DBG("chan %p quote %d", chan, *quote);
3755         return chan;
3756 }
3757
3758 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3759 {
3760         struct hci_conn_hash *h = &hdev->conn_hash;
3761         struct hci_conn *conn;
3762         int num = 0;
3763
3764         BT_DBG("%s", hdev->name);
3765
3766         rcu_read_lock();
3767
3768         list_for_each_entry_rcu(conn, &h->list, list) {
3769                 struct hci_chan *chan;
3770
3771                 if (conn->type != type)
3772                         continue;
3773
3774                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3775                         continue;
3776
3777                 num++;
3778
3779                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3780                         struct sk_buff *skb;
3781
3782                         if (chan->sent) {
3783                                 chan->sent = 0;
3784                                 continue;
3785                         }
3786
3787                         if (skb_queue_empty(&chan->data_q))
3788                                 continue;
3789
3790                         skb = skb_peek(&chan->data_q);
3791                         if (skb->priority >= HCI_PRIO_MAX - 1)
3792                                 continue;
3793
3794                         skb->priority = HCI_PRIO_MAX - 1;
3795
3796                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3797                                skb->priority);
3798                 }
3799
3800                 if (hci_conn_num(hdev, type) == num)
3801                         break;
3802         }
3803
3804         rcu_read_unlock();
3805
3806 }
3807
3808 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3809 {
3810         /* Calculate count of blocks used by this packet */
3811         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3812 }
3813
3814 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3815 {
3816         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3817                 /* ACL tx timeout must be longer than maximum
3818                  * link supervision timeout (40.9 seconds) */
3819                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3820                                        HCI_ACL_TX_TIMEOUT))
3821                         hci_link_tx_to(hdev, ACL_LINK);
3822         }
3823 }
3824
3825 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3826 {
3827         unsigned int cnt = hdev->acl_cnt;
3828         struct hci_chan *chan;
3829         struct sk_buff *skb;
3830         int quote;
3831
3832         __check_timeout(hdev, cnt);
3833
3834         while (hdev->acl_cnt &&
3835                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3836                 u32 priority = (skb_peek(&chan->data_q))->priority;
3837                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3838                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3839                                skb->len, skb->priority);
3840
3841                         /* Stop if priority has changed */
3842                         if (skb->priority < priority)
3843                                 break;
3844
3845                         skb = skb_dequeue(&chan->data_q);
3846
3847                         hci_conn_enter_active_mode(chan->conn,
3848                                                    bt_cb(skb)->force_active);
3849
3850                         hci_send_frame(hdev, skb);
3851                         hdev->acl_last_tx = jiffies;
3852
3853                         hdev->acl_cnt--;
3854                         chan->sent++;
3855                         chan->conn->sent++;
3856                 }
3857         }
3858
3859         if (cnt != hdev->acl_cnt)
3860                 hci_prio_recalculate(hdev, ACL_LINK);
3861 }
3862
3863 static void hci_sched_acl_blk(struct hci_dev *hdev)
3864 {
3865         unsigned int cnt = hdev->block_cnt;
3866         struct hci_chan *chan;
3867         struct sk_buff *skb;
3868         int quote;
3869         u8 type;
3870
3871         __check_timeout(hdev, cnt);
3872
3873         BT_DBG("%s", hdev->name);
3874
3875         if (hdev->dev_type == HCI_AMP)
3876                 type = AMP_LINK;
3877         else
3878                 type = ACL_LINK;
3879
3880         while (hdev->block_cnt > 0 &&
3881                (chan = hci_chan_sent(hdev, type, &quote))) {
3882                 u32 priority = (skb_peek(&chan->data_q))->priority;
3883                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3884                         int blocks;
3885
3886                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3887                                skb->len, skb->priority);
3888
3889                         /* Stop if priority has changed */
3890                         if (skb->priority < priority)
3891                                 break;
3892
3893                         skb = skb_dequeue(&chan->data_q);
3894
3895                         blocks = __get_blocks(hdev, skb);
3896                         if (blocks > hdev->block_cnt)
3897                                 return;
3898
3899                         hci_conn_enter_active_mode(chan->conn,
3900                                                    bt_cb(skb)->force_active);
3901
3902                         hci_send_frame(hdev, skb);
3903                         hdev->acl_last_tx = jiffies;
3904
3905                         hdev->block_cnt -= blocks;
3906                         quote -= blocks;
3907
3908                         chan->sent += blocks;
3909                         chan->conn->sent += blocks;
3910                 }
3911         }
3912
3913         if (cnt != hdev->block_cnt)
3914                 hci_prio_recalculate(hdev, type);
3915 }
3916
3917 static void hci_sched_acl(struct hci_dev *hdev)
3918 {
3919         BT_DBG("%s", hdev->name);
3920
3921         /* No ACL link over BR/EDR controller */
3922         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3923                 return;
3924
3925         /* No AMP link over AMP controller */
3926         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3927                 return;
3928
3929         switch (hdev->flow_ctl_mode) {
3930         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3931                 hci_sched_acl_pkt(hdev);
3932                 break;
3933
3934         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3935                 hci_sched_acl_blk(hdev);
3936                 break;
3937         }
3938 }
3939
3940 /* Schedule SCO */
3941 static void hci_sched_sco(struct hci_dev *hdev)
3942 {
3943         struct hci_conn *conn;
3944         struct sk_buff *skb;
3945         int quote;
3946
3947         BT_DBG("%s", hdev->name);
3948
3949         if (!hci_conn_num(hdev, SCO_LINK))
3950                 return;
3951
3952         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3953                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3954                         BT_DBG("skb %p len %d", skb, skb->len);
3955                         hci_send_frame(hdev, skb);
3956
3957                         conn->sent++;
3958                         if (conn->sent == ~0)
3959                                 conn->sent = 0;
3960                 }
3961         }
3962 }
3963
3964 static void hci_sched_esco(struct hci_dev *hdev)
3965 {
3966         struct hci_conn *conn;
3967         struct sk_buff *skb;
3968         int quote;
3969
3970         BT_DBG("%s", hdev->name);
3971
3972         if (!hci_conn_num(hdev, ESCO_LINK))
3973                 return;
3974
3975         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3976                                                      &quote))) {
3977                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3978                         BT_DBG("skb %p len %d", skb, skb->len);
3979                         hci_send_frame(hdev, skb);
3980
3981                         conn->sent++;
3982                         if (conn->sent == ~0)
3983                                 conn->sent = 0;
3984                 }
3985         }
3986 }
3987
3988 static void hci_sched_le(struct hci_dev *hdev)
3989 {
3990         struct hci_chan *chan;
3991         struct sk_buff *skb;
3992         int quote, cnt, tmp;
3993
3994         BT_DBG("%s", hdev->name);
3995
3996         if (!hci_conn_num(hdev, LE_LINK))
3997                 return;
3998
3999         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4000                 /* LE tx timeout must be longer than maximum
4001                  * link supervision timeout (40.9 seconds) */
4002                 if (!hdev->le_cnt && hdev->le_pkts &&
4003                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4004                         hci_link_tx_to(hdev, LE_LINK);
4005         }
4006
4007         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4008         tmp = cnt;
4009         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4010                 u32 priority = (skb_peek(&chan->data_q))->priority;
4011                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4012                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4013                                skb->len, skb->priority);
4014
4015                         /* Stop if priority has changed */
4016                         if (skb->priority < priority)
4017                                 break;
4018
4019                         skb = skb_dequeue(&chan->data_q);
4020
4021                         hci_send_frame(hdev, skb);
4022                         hdev->le_last_tx = jiffies;
4023
4024                         cnt--;
4025                         chan->sent++;
4026                         chan->conn->sent++;
4027                 }
4028         }
4029
4030         if (hdev->le_pkts)
4031                 hdev->le_cnt = cnt;
4032         else
4033                 hdev->acl_cnt = cnt;
4034
4035         if (cnt != tmp)
4036                 hci_prio_recalculate(hdev, LE_LINK);
4037 }
4038
4039 static void hci_tx_work(struct work_struct *work)
4040 {
4041         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4042         struct sk_buff *skb;
4043
4044         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4045                hdev->sco_cnt, hdev->le_cnt);
4046
4047         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4048                 /* Schedule queues and send stuff to HCI driver */
4049                 hci_sched_acl(hdev);
4050                 hci_sched_sco(hdev);
4051                 hci_sched_esco(hdev);
4052                 hci_sched_le(hdev);
4053         }
4054
4055         /* Send next queued raw (unknown type) packet */
4056         while ((skb = skb_dequeue(&hdev->raw_q)))
4057                 hci_send_frame(hdev, skb);
4058 }
4059
4060 /* ----- HCI RX task (incoming data processing) ----- */
4061
4062 /* ACL data packet */
4063 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4064 {
4065         struct hci_acl_hdr *hdr = (void *) skb->data;
4066         struct hci_conn *conn;
4067         __u16 handle, flags;
4068
4069         skb_pull(skb, HCI_ACL_HDR_SIZE);
4070
4071         handle = __le16_to_cpu(hdr->handle);
4072         flags  = hci_flags(handle);
4073         handle = hci_handle(handle);
4074
4075         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4076                handle, flags);
4077
4078         hdev->stat.acl_rx++;
4079
4080         hci_dev_lock(hdev);
4081         conn = hci_conn_hash_lookup_handle(hdev, handle);
4082         hci_dev_unlock(hdev);
4083
4084         if (conn) {
4085                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4086
4087                 /* Send to upper protocol */
4088                 l2cap_recv_acldata(conn, skb, flags);
4089                 return;
4090         } else {
4091                 BT_ERR("%s ACL packet for unknown connection handle %d",
4092                        hdev->name, handle);
4093         }
4094
4095         kfree_skb(skb);
4096 }
4097
4098 /* SCO data packet */
4099 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4100 {
4101         struct hci_sco_hdr *hdr = (void *) skb->data;
4102         struct hci_conn *conn;
4103         __u16 handle;
4104
4105         skb_pull(skb, HCI_SCO_HDR_SIZE);
4106
4107         handle = __le16_to_cpu(hdr->handle);
4108
4109         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4110
4111         hdev->stat.sco_rx++;
4112
4113         hci_dev_lock(hdev);
4114         conn = hci_conn_hash_lookup_handle(hdev, handle);
4115         hci_dev_unlock(hdev);
4116
4117         if (conn) {
4118                 /* Send to upper protocol */
4119                 sco_recv_scodata(conn, skb);
4120                 return;
4121         } else {
4122                 BT_ERR("%s SCO packet for unknown connection handle %d",
4123                        hdev->name, handle);
4124         }
4125
4126         kfree_skb(skb);
4127 }
4128
4129 static bool hci_req_is_complete(struct hci_dev *hdev)
4130 {
4131         struct sk_buff *skb;
4132
4133         skb = skb_peek(&hdev->cmd_q);
4134         if (!skb)
4135                 return true;
4136
4137         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4138 }
4139
4140 static void hci_resend_last(struct hci_dev *hdev)
4141 {
4142         struct hci_command_hdr *sent;
4143         struct sk_buff *skb;
4144         u16 opcode;
4145
4146         if (!hdev->sent_cmd)
4147                 return;
4148
4149         sent = (void *) hdev->sent_cmd->data;
4150         opcode = __le16_to_cpu(sent->opcode);
4151         if (opcode == HCI_OP_RESET)
4152                 return;
4153
4154         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4155         if (!skb)
4156                 return;
4157
4158         skb_queue_head(&hdev->cmd_q, skb);
4159         queue_work(hdev->workqueue, &hdev->cmd_work);
4160 }
4161
4162 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4163                           hci_req_complete_t *req_complete,
4164                           hci_req_complete_skb_t *req_complete_skb)
4165 {
4166         struct sk_buff *skb;
4167         unsigned long flags;
4168
4169         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4170
4171         /* If the completed command doesn't match the last one that was
4172          * sent we need to do special handling of it.
4173          */
4174         if (!hci_sent_cmd_data(hdev, opcode)) {
4175                 /* Some CSR based controllers generate a spontaneous
4176                  * reset complete event during init and any pending
4177                  * command will never be completed. In such a case we
4178                  * need to resend whatever was the last sent
4179                  * command.
4180                  */
4181                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4182                         hci_resend_last(hdev);
4183
4184                 return;
4185         }
4186
4187         /* If the command succeeded and there's still more commands in
4188          * this request the request is not yet complete.
4189          */
4190         if (!status && !hci_req_is_complete(hdev))
4191                 return;
4192
4193         /* If this was the last command in a request the complete
4194          * callback would be found in hdev->sent_cmd instead of the
4195          * command queue (hdev->cmd_q).
4196          */
4197         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4198                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4199                 return;
4200         }
4201
4202         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4203                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4204                 return;
4205         }
4206
4207         /* Remove all pending commands belonging to this request */
4208         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4209         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4210                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4211                         __skb_queue_head(&hdev->cmd_q, skb);
4212                         break;
4213                 }
4214
4215                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4216                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4217                 else
4218                         *req_complete = bt_cb(skb)->hci.req_complete;
4219                 kfree_skb(skb);
4220         }
4221         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4222 }
4223
4224 static void hci_rx_work(struct work_struct *work)
4225 {
4226         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4227         struct sk_buff *skb;
4228
4229         BT_DBG("%s", hdev->name);
4230
4231         while ((skb = skb_dequeue(&hdev->rx_q))) {
4232                 /* Send copy to monitor */
4233                 hci_send_to_monitor(hdev, skb);
4234
4235                 if (atomic_read(&hdev->promisc)) {
4236                         /* Send copy to the sockets */
4237                         hci_send_to_sock(hdev, skb);
4238                 }
4239
4240                 /* If the device has been opened in HCI_USER_CHANNEL,
4241                  * the userspace has exclusive access to device.
4242                  * When device is HCI_INIT, we still need to process
4243                  * the data packets to the driver in order
4244                  * to complete its setup().
4245                  */
4246                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4247                     !test_bit(HCI_INIT, &hdev->flags)) {
4248                         kfree_skb(skb);
4249                         continue;
4250                 }
4251
4252                 if (test_bit(HCI_INIT, &hdev->flags)) {
4253                         /* Don't process data packets in this states. */
4254                         switch (hci_skb_pkt_type(skb)) {
4255                         case HCI_ACLDATA_PKT:
4256                         case HCI_SCODATA_PKT:
4257                                 kfree_skb(skb);
4258                                 continue;
4259                         }
4260                 }
4261
4262                 /* Process frame */
4263                 switch (hci_skb_pkt_type(skb)) {
4264                 case HCI_EVENT_PKT:
4265                         BT_DBG("%s Event packet", hdev->name);
4266                         hci_event_packet(hdev, skb);
4267                         break;
4268
4269                 case HCI_ACLDATA_PKT:
4270                         BT_DBG("%s ACL data packet", hdev->name);
4271                         hci_acldata_packet(hdev, skb);
4272                         break;
4273
4274                 case HCI_SCODATA_PKT:
4275                         BT_DBG("%s SCO data packet", hdev->name);
4276                         hci_scodata_packet(hdev, skb);
4277                         break;
4278
4279                 default:
4280                         kfree_skb(skb);
4281                         break;
4282                 }
4283         }
4284 }
4285
4286 static void hci_cmd_work(struct work_struct *work)
4287 {
4288         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4289         struct sk_buff *skb;
4290
4291         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4292                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4293
4294         /* Send queued commands */
4295         if (atomic_read(&hdev->cmd_cnt)) {
4296                 skb = skb_dequeue(&hdev->cmd_q);
4297                 if (!skb)
4298                         return;
4299
4300                 kfree_skb(hdev->sent_cmd);
4301
4302                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4303                 if (hdev->sent_cmd) {
4304                         atomic_dec(&hdev->cmd_cnt);
4305                         hci_send_frame(hdev, skb);
4306                         if (test_bit(HCI_RESET, &hdev->flags))
4307                                 cancel_delayed_work(&hdev->cmd_timer);
4308                         else
4309                                 schedule_delayed_work(&hdev->cmd_timer,
4310                                                       HCI_CMD_TIMEOUT);
4311                 } else {
4312                         skb_queue_head(&hdev->cmd_q, skb);
4313                         queue_work(hdev->workqueue, &hdev->cmd_work);
4314                 }
4315         }
4316 }