GNU Linux-libre 4.19.264-gnu1
[releases.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         bool enable;
80         int err;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         err = kstrtobool_from_user(user_buf, count, &enable);
86         if (err)
87                 return err;
88
89         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90                 return -EALREADY;
91
92         hci_req_sync_lock(hdev);
93         if (enable)
94                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95                                      HCI_CMD_TIMEOUT);
96         else
97                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         hci_req_sync_unlock(hdev);
100
101         if (IS_ERR(skb))
102                 return PTR_ERR(skb);
103
104         kfree_skb(skb);
105
106         hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108         return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112         .open           = simple_open,
113         .read           = dut_mode_read,
114         .write          = dut_mode_write,
115         .llseek         = default_llseek,
116 };
117
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119                                 size_t count, loff_t *ppos)
120 {
121         struct hci_dev *hdev = file->private_data;
122         char buf[3];
123
124         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125         buf[1] = '\n';
126         buf[2] = '\0';
127         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131                                  size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         bool enable;
135         int err;
136
137         err = kstrtobool_from_user(user_buf, count, &enable);
138         if (err)
139                 return err;
140
141         /* When the diagnostic flags are not persistent and the transport
142          * is not active or in user channel operation, then there is no need
143          * for the vendor callback. Instead just store the desired value and
144          * the setting will be programmed when the controller gets powered on.
145          */
146         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147             (!test_bit(HCI_RUNNING, &hdev->flags) ||
148              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149                 goto done;
150
151         hci_req_sync_lock(hdev);
152         err = hdev->set_diag(hdev, enable);
153         hci_req_sync_unlock(hdev);
154
155         if (err < 0)
156                 return err;
157
158 done:
159         if (enable)
160                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161         else
162                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164         return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168         .open           = simple_open,
169         .read           = vendor_diag_read,
170         .write          = vendor_diag_write,
171         .llseek         = default_llseek,
172 };
173
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177                             &dut_mode_fops);
178
179         if (hdev->set_diag)
180                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181                                     &vendor_diag_fops);
182 }
183
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186         BT_DBG("%s %ld", req->hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &req->hdev->flags);
190         hci_req_add(req, HCI_OP_RESET, 0, NULL);
191         return 0;
192 }
193
194 static void bredr_init(struct hci_request *req)
195 {
196         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Read Local Supported Features */
199         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201         /* Read Local Version */
202         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204         /* Read BD Address */
205         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
208 static void amp_init1(struct hci_request *req)
209 {
210         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212         /* Read Local Version */
213         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Local Supported Commands */
216         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218         /* Read Local AMP Info */
219         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221         /* Read Data Blk size */
222         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224         /* Read Flow Control Mode */
225         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227         /* Read Location Data */
228         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
231 static int amp_init2(struct hci_request *req)
232 {
233         /* Read Local Supported Features. Not all AMP controllers
234          * support this so it's placed conditionally in the second
235          * stage init.
236          */
237         if (req->hdev->commands[14] & 0x20)
238                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240         return 0;
241 }
242
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245         struct hci_dev *hdev = req->hdev;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Reset */
250         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251                 hci_reset_req(req, 0);
252
253         switch (hdev->dev_type) {
254         case HCI_PRIMARY:
255                 bredr_init(req);
256                 break;
257         case HCI_AMP:
258                 amp_init1(req);
259                 break;
260         default:
261                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262                 break;
263         }
264
265         return 0;
266 }
267
268 static void bredr_setup(struct hci_request *req)
269 {
270         __le16 param;
271         __u8 flt_type;
272
273         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276         /* Read Class of Device */
277         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279         /* Read Local Name */
280         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282         /* Read Voice Setting */
283         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285         /* Read Number of Supported IAC */
286         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288         /* Read Current IAC LAP */
289         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291         /* Clear Event Filters */
292         flt_type = HCI_FLT_CLEAR_ALL;
293         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295         /* Connection accept timeout ~20 secs */
296         param = cpu_to_le16(0x7d00);
297         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
298 }
299
300 static void le_setup(struct hci_request *req)
301 {
302         struct hci_dev *hdev = req->hdev;
303
304         /* Read LE Buffer Size */
305         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307         /* Read LE Local Supported Features */
308         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310         /* Read LE Supported States */
311         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313         /* LE-only controllers have LE implicitly enabled */
314         if (!lmp_bredr_capable(hdev))
315                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320         struct hci_dev *hdev = req->hdev;
321
322         /* The second byte is 0xff instead of 0x9f (two reserved bits
323          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324          * command otherwise.
325          */
326         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329          * any event mask for pre 1.2 devices.
330          */
331         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332                 return;
333
334         if (lmp_bredr_capable(hdev)) {
335                 events[4] |= 0x01; /* Flow Specification Complete */
336         } else {
337                 /* Use a different default for LE-only devices */
338                 memset(events, 0, sizeof(events));
339                 events[1] |= 0x20; /* Command Complete */
340                 events[1] |= 0x40; /* Command Status */
341                 events[1] |= 0x80; /* Hardware Error */
342
343                 /* If the controller supports the Disconnect command, enable
344                  * the corresponding event. In addition enable packet flow
345                  * control related events.
346                  */
347                 if (hdev->commands[0] & 0x20) {
348                         events[0] |= 0x10; /* Disconnection Complete */
349                         events[2] |= 0x04; /* Number of Completed Packets */
350                         events[3] |= 0x02; /* Data Buffer Overflow */
351                 }
352
353                 /* If the controller supports the Read Remote Version
354                  * Information command, enable the corresponding event.
355                  */
356                 if (hdev->commands[2] & 0x80)
357                         events[1] |= 0x08; /* Read Remote Version Information
358                                             * Complete
359                                             */
360
361                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362                         events[0] |= 0x80; /* Encryption Change */
363                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
364                 }
365         }
366
367         if (lmp_inq_rssi_capable(hdev) ||
368             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369                 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371         if (lmp_ext_feat_capable(hdev))
372                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374         if (lmp_esco_capable(hdev)) {
375                 events[5] |= 0x08; /* Synchronous Connection Complete */
376                 events[5] |= 0x10; /* Synchronous Connection Changed */
377         }
378
379         if (lmp_sniffsubr_capable(hdev))
380                 events[5] |= 0x20; /* Sniff Subrating */
381
382         if (lmp_pause_enc_capable(hdev))
383                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385         if (lmp_ext_inq_capable(hdev))
386                 events[5] |= 0x40; /* Extended Inquiry Result */
387
388         if (lmp_no_flush_capable(hdev))
389                 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391         if (lmp_lsto_capable(hdev))
392                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394         if (lmp_ssp_capable(hdev)) {
395                 events[6] |= 0x01;      /* IO Capability Request */
396                 events[6] |= 0x02;      /* IO Capability Response */
397                 events[6] |= 0x04;      /* User Confirmation Request */
398                 events[6] |= 0x08;      /* User Passkey Request */
399                 events[6] |= 0x10;      /* Remote OOB Data Request */
400                 events[6] |= 0x20;      /* Simple Pairing Complete */
401                 events[7] |= 0x04;      /* User Passkey Notification */
402                 events[7] |= 0x08;      /* Keypress Notification */
403                 events[7] |= 0x10;      /* Remote Host Supported
404                                          * Features Notification
405                                          */
406         }
407
408         if (lmp_le_capable(hdev))
409                 events[7] |= 0x20;      /* LE Meta-Event */
410
411         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416         struct hci_dev *hdev = req->hdev;
417
418         if (hdev->dev_type == HCI_AMP)
419                 return amp_init2(req);
420
421         if (lmp_bredr_capable(hdev))
422                 bredr_setup(req);
423         else
424                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426         if (lmp_le_capable(hdev))
427                 le_setup(req);
428
429         /* All Bluetooth 1.2 and later controllers should support the
430          * HCI command for reading the local supported commands.
431          *
432          * Unfortunately some controllers indicate Bluetooth 1.2 support,
433          * but do not have support for this command. If that is the case,
434          * the driver can quirk the behavior and skip reading the local
435          * supported commands.
436          */
437         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441         if (lmp_ssp_capable(hdev)) {
442                 /* When SSP is available, then the host features page
443                  * should also be available as well. However some
444                  * controllers list the max_page as 0 as long as SSP
445                  * has not been enabled. To achieve proper debugging
446                  * output, force the minimum max_page to 1 at least.
447                  */
448                 hdev->max_page = 0x01;
449
450                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451                         u8 mode = 0x01;
452
453                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454                                     sizeof(mode), &mode);
455                 } else {
456                         struct hci_cp_write_eir cp;
457
458                         memset(hdev->eir, 0, sizeof(hdev->eir));
459                         memset(&cp, 0, sizeof(cp));
460
461                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462                 }
463         }
464
465         if (lmp_inq_rssi_capable(hdev) ||
466             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467                 u8 mode;
468
469                 /* If Extended Inquiry Result events are supported, then
470                  * they are clearly preferred over Inquiry Result with RSSI
471                  * events.
472                  */
473                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476         }
477
478         if (lmp_inq_tx_pwr_capable(hdev))
479                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481         if (lmp_ext_feat_capable(hdev)) {
482                 struct hci_cp_read_local_ext_features cp;
483
484                 cp.page = 0x01;
485                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486                             sizeof(cp), &cp);
487         }
488
489         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490                 u8 enable = 1;
491                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492                             &enable);
493         }
494
495         return 0;
496 }
497
498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501         struct hci_cp_write_def_link_policy cp;
502         u16 link_policy = 0;
503
504         if (lmp_rswitch_capable(hdev))
505                 link_policy |= HCI_LP_RSWITCH;
506         if (lmp_hold_capable(hdev))
507                 link_policy |= HCI_LP_HOLD;
508         if (lmp_sniff_capable(hdev))
509                 link_policy |= HCI_LP_SNIFF;
510         if (lmp_park_capable(hdev))
511                 link_policy |= HCI_LP_PARK;
512
513         cp.policy = cpu_to_le16(link_policy);
514         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
517 static void hci_set_le_support(struct hci_request *req)
518 {
519         struct hci_dev *hdev = req->hdev;
520         struct hci_cp_write_le_host_supported cp;
521
522         /* LE-only devices do not support explicit enablement */
523         if (!lmp_bredr_capable(hdev))
524                 return;
525
526         memset(&cp, 0, sizeof(cp));
527
528         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529                 cp.le = 0x01;
530                 cp.simul = 0x00;
531         }
532
533         if (cp.le != lmp_host_le_capable(hdev))
534                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535                             &cp);
536 }
537
538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540         struct hci_dev *hdev = req->hdev;
541         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542         bool changed = false;
543
544         /* If Connectionless Slave Broadcast master role is supported
545          * enable all necessary events for it.
546          */
547         if (lmp_csb_master_capable(hdev)) {
548                 events[1] |= 0x40;      /* Triggered Clock Capture */
549                 events[1] |= 0x80;      /* Synchronization Train Complete */
550                 events[2] |= 0x10;      /* Slave Page Response Timeout */
551                 events[2] |= 0x20;      /* CSB Channel Map Change */
552                 changed = true;
553         }
554
555         /* If Connectionless Slave Broadcast slave role is supported
556          * enable all necessary events for it.
557          */
558         if (lmp_csb_slave_capable(hdev)) {
559                 events[2] |= 0x01;      /* Synchronization Train Received */
560                 events[2] |= 0x02;      /* CSB Receive */
561                 events[2] |= 0x04;      /* CSB Timeout */
562                 events[2] |= 0x08;      /* Truncated Page Complete */
563                 changed = true;
564         }
565
566         /* Enable Authenticated Payload Timeout Expired event if supported */
567         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568                 events[2] |= 0x80;
569                 changed = true;
570         }
571
572         /* Some Broadcom based controllers indicate support for Set Event
573          * Mask Page 2 command, but then actually do not support it. Since
574          * the default value is all bits set to zero, the command is only
575          * required if the event mask has to be changed. In case no change
576          * to the event mask is needed, skip this command.
577          */
578         if (changed)
579                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580                             sizeof(events), events);
581 }
582
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585         struct hci_dev *hdev = req->hdev;
586         u8 p;
587
588         hci_setup_event_mask(req);
589
590         if (hdev->commands[6] & 0x20 &&
591             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592                 struct hci_cp_read_stored_link_key cp;
593
594                 bacpy(&cp.bdaddr, BDADDR_ANY);
595                 cp.read_all = 0x01;
596                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597         }
598
599         if (hdev->commands[5] & 0x10)
600                 hci_setup_link_policy(req);
601
602         if (hdev->commands[8] & 0x01)
603                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605         /* Some older Broadcom based Bluetooth 1.2 controllers do not
606          * support the Read Page Scan Type command. Check support for
607          * this command in the bit mask of supported commands.
608          */
609         if (hdev->commands[13] & 0x01)
610                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612         if (lmp_le_capable(hdev)) {
613                 u8 events[8];
614
615                 memset(events, 0, sizeof(events));
616
617                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618                         events[0] |= 0x10;      /* LE Long Term Key Request */
619
620                 /* If controller supports the Connection Parameters Request
621                  * Link Layer Procedure, enable the corresponding event.
622                  */
623                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624                         events[0] |= 0x20;      /* LE Remote Connection
625                                                  * Parameter Request
626                                                  */
627
628                 /* If the controller supports the Data Length Extension
629                  * feature, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632                         events[0] |= 0x40;      /* LE Data Length Change */
633
634                 /* If the controller supports Extended Scanner Filter
635                  * Policies, enable the correspondig event.
636                  */
637                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638                         events[1] |= 0x04;      /* LE Direct Advertising
639                                                  * Report
640                                                  */
641
642                 /* If the controller supports Channel Selection Algorithm #2
643                  * feature, enable the corresponding event.
644                  */
645                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646                         events[2] |= 0x08;      /* LE Channel Selection
647                                                  * Algorithm
648                                                  */
649
650                 /* If the controller supports the LE Set Scan Enable command,
651                  * enable the corresponding advertising report event.
652                  */
653                 if (hdev->commands[26] & 0x08)
654                         events[0] |= 0x02;      /* LE Advertising Report */
655
656                 /* If the controller supports the LE Create Connection
657                  * command, enable the corresponding event.
658                  */
659                 if (hdev->commands[26] & 0x10)
660                         events[0] |= 0x01;      /* LE Connection Complete */
661
662                 /* If the controller supports the LE Connection Update
663                  * command, enable the corresponding event.
664                  */
665                 if (hdev->commands[27] & 0x04)
666                         events[0] |= 0x04;      /* LE Connection Update
667                                                  * Complete
668                                                  */
669
670                 /* If the controller supports the LE Read Remote Used Features
671                  * command, enable the corresponding event.
672                  */
673                 if (hdev->commands[27] & 0x20)
674                         events[0] |= 0x08;      /* LE Read Remote Used
675                                                  * Features Complete
676                                                  */
677
678                 /* If the controller supports the LE Read Local P-256
679                  * Public Key command, enable the corresponding event.
680                  */
681                 if (hdev->commands[34] & 0x02)
682                         events[0] |= 0x80;      /* LE Read Local P-256
683                                                  * Public Key Complete
684                                                  */
685
686                 /* If the controller supports the LE Generate DHKey
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[34] & 0x04)
690                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
691
692                 /* If the controller supports the LE Set Default PHY or
693                  * LE Set PHY commands, enable the corresponding event.
694                  */
695                 if (hdev->commands[35] & (0x20 | 0x40))
696                         events[1] |= 0x08;        /* LE PHY Update Complete */
697
698                 /* If the controller supports LE Set Extended Scan Parameters
699                  * and LE Set Extended Scan Enable commands, enable the
700                  * corresponding event.
701                  */
702                 if (use_ext_scan(hdev))
703                         events[1] |= 0x10;      /* LE Extended Advertising
704                                                  * Report
705                                                  */
706
707                 /* If the controller supports the LE Extended Create Connection
708                  * command, enable the corresponding event.
709                  */
710                 if (use_ext_conn(hdev))
711                         events[1] |= 0x02;      /* LE Enhanced Connection
712                                                  * Complete
713                                                  */
714
715                 /* If the controller supports the LE Extended Advertising
716                  * command, enable the corresponding event.
717                  */
718                 if (ext_adv_capable(hdev))
719                         events[2] |= 0x02;      /* LE Advertising Set
720                                                  * Terminated
721                                                  */
722
723                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
724                             events);
725
726                 /* Read LE Advertising Channel TX Power */
727                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
728                         /* HCI TS spec forbids mixing of legacy and extended
729                          * advertising commands wherein READ_ADV_TX_POWER is
730                          * also included. So do not call it if extended adv
731                          * is supported otherwise controller will return
732                          * COMMAND_DISALLOWED for extended commands.
733                          */
734                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
735                 }
736
737                 if (hdev->commands[26] & 0x40) {
738                         /* Read LE White List Size */
739                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
740                                     0, NULL);
741                 }
742
743                 if (hdev->commands[26] & 0x80) {
744                         /* Clear LE White List */
745                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
746                 }
747
748                 if (hdev->commands[34] & 0x40) {
749                         /* Read LE Resolving List Size */
750                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
751                                     0, NULL);
752                 }
753
754                 if (hdev->commands[34] & 0x20) {
755                         /* Clear LE Resolving List */
756                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
757                 }
758
759                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
760                         /* Read LE Maximum Data Length */
761                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
762
763                         /* Read LE Suggested Default Data Length */
764                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
765                 }
766
767                 if (ext_adv_capable(hdev)) {
768                         /* Read LE Number of Supported Advertising Sets */
769                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
770                                     0, NULL);
771                 }
772
773                 hci_set_le_support(req);
774         }
775
776         /* Read features beyond page 1 if available */
777         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
778                 struct hci_cp_read_local_ext_features cp;
779
780                 cp.page = p;
781                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
782                             sizeof(cp), &cp);
783         }
784
785         return 0;
786 }
787
788 static int hci_init4_req(struct hci_request *req, unsigned long opt)
789 {
790         struct hci_dev *hdev = req->hdev;
791
792         /* Some Broadcom based Bluetooth controllers do not support the
793          * Delete Stored Link Key command. They are clearly indicating its
794          * absence in the bit mask of supported commands.
795          *
796          * Check the supported commands and only if the the command is marked
797          * as supported send it. If not supported assume that the controller
798          * does not have actual support for stored link keys which makes this
799          * command redundant anyway.
800          *
801          * Some controllers indicate that they support handling deleting
802          * stored link keys, but they don't. The quirk lets a driver
803          * just disable this command.
804          */
805         if (hdev->commands[6] & 0x80 &&
806             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
807                 struct hci_cp_delete_stored_link_key cp;
808
809                 bacpy(&cp.bdaddr, BDADDR_ANY);
810                 cp.delete_all = 0x01;
811                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
812                             sizeof(cp), &cp);
813         }
814
815         /* Set event mask page 2 if the HCI command for it is supported */
816         if (hdev->commands[22] & 0x04)
817                 hci_set_event_mask_page_2(req);
818
819         /* Read local codec list if the HCI command is supported */
820         if (hdev->commands[29] & 0x20)
821                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
822
823         /* Get MWS transport configuration if the HCI command is supported */
824         if (hdev->commands[30] & 0x08)
825                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
826
827         /* Check for Synchronization Train support */
828         if (lmp_sync_train_capable(hdev))
829                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
830
831         /* Enable Secure Connections if supported and configured */
832         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
833             bredr_sc_enabled(hdev)) {
834                 u8 support = 0x01;
835
836                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
837                             sizeof(support), &support);
838         }
839
840         /* Set Suggested Default Data Length to maximum if supported */
841         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842                 struct hci_cp_le_write_def_data_len cp;
843
844                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
845                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
846                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
847         }
848
849         /* Set Default PHY parameters if command is supported */
850         if (hdev->commands[35] & 0x20) {
851                 struct hci_cp_le_set_default_phy cp;
852
853                 cp.all_phys = 0x00;
854                 cp.tx_phys = hdev->le_tx_def_phys;
855                 cp.rx_phys = hdev->le_rx_def_phys;
856
857                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
858         }
859
860         return 0;
861 }
862
863 static int __hci_init(struct hci_dev *hdev)
864 {
865         int err;
866
867         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
868         if (err < 0)
869                 return err;
870
871         if (hci_dev_test_flag(hdev, HCI_SETUP))
872                 hci_debugfs_create_basic(hdev);
873
874         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
875         if (err < 0)
876                 return err;
877
878         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879          * BR/EDR/LE type controllers. AMP controllers only need the
880          * first two stages of init.
881          */
882         if (hdev->dev_type != HCI_PRIMARY)
883                 return 0;
884
885         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
886         if (err < 0)
887                 return err;
888
889         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
890         if (err < 0)
891                 return err;
892
893         /* This function is only called when the controller is actually in
894          * configured state. When the controller is marked as unconfigured,
895          * this initialization procedure is not run.
896          *
897          * It means that it is possible that a controller runs through its
898          * setup phase and then discovers missing settings. If that is the
899          * case, then this function will not be called. It then will only
900          * be called during the config phase.
901          *
902          * So only when in setup phase or config phase, create the debugfs
903          * entries and register the SMP channels.
904          */
905         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906             !hci_dev_test_flag(hdev, HCI_CONFIG))
907                 return 0;
908
909         hci_debugfs_create_common(hdev);
910
911         if (lmp_bredr_capable(hdev))
912                 hci_debugfs_create_bredr(hdev);
913
914         if (lmp_le_capable(hdev))
915                 hci_debugfs_create_le(hdev);
916
917         return 0;
918 }
919
920 static int hci_init0_req(struct hci_request *req, unsigned long opt)
921 {
922         struct hci_dev *hdev = req->hdev;
923
924         BT_DBG("%s %ld", hdev->name, opt);
925
926         /* Reset */
927         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
928                 hci_reset_req(req, 0);
929
930         /* Read Local Version */
931         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
932
933         /* Read BD Address */
934         if (hdev->set_bdaddr)
935                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
936
937         return 0;
938 }
939
940 static int __hci_unconf_init(struct hci_dev *hdev)
941 {
942         int err;
943
944         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
945                 return 0;
946
947         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
948         if (err < 0)
949                 return err;
950
951         if (hci_dev_test_flag(hdev, HCI_SETUP))
952                 hci_debugfs_create_basic(hdev);
953
954         return 0;
955 }
956
957 static int hci_scan_req(struct hci_request *req, unsigned long opt)
958 {
959         __u8 scan = opt;
960
961         BT_DBG("%s %x", req->hdev->name, scan);
962
963         /* Inquiry and Page scans */
964         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965         return 0;
966 }
967
968 static int hci_auth_req(struct hci_request *req, unsigned long opt)
969 {
970         __u8 auth = opt;
971
972         BT_DBG("%s %x", req->hdev->name, auth);
973
974         /* Authentication */
975         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
976         return 0;
977 }
978
979 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
980 {
981         __u8 encrypt = opt;
982
983         BT_DBG("%s %x", req->hdev->name, encrypt);
984
985         /* Encryption */
986         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
987         return 0;
988 }
989
990 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
991 {
992         __le16 policy = cpu_to_le16(opt);
993
994         BT_DBG("%s %x", req->hdev->name, policy);
995
996         /* Default link policy */
997         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
998         return 0;
999 }
1000
1001 /* Get HCI device by index.
1002  * Device is held on return. */
1003 struct hci_dev *hci_dev_get(int index)
1004 {
1005         struct hci_dev *hdev = NULL, *d;
1006
1007         BT_DBG("%d", index);
1008
1009         if (index < 0)
1010                 return NULL;
1011
1012         read_lock(&hci_dev_list_lock);
1013         list_for_each_entry(d, &hci_dev_list, list) {
1014                 if (d->id == index) {
1015                         hdev = hci_dev_hold(d);
1016                         break;
1017                 }
1018         }
1019         read_unlock(&hci_dev_list_lock);
1020         return hdev;
1021 }
1022
1023 /* ---- Inquiry support ---- */
1024
1025 bool hci_discovery_active(struct hci_dev *hdev)
1026 {
1027         struct discovery_state *discov = &hdev->discovery;
1028
1029         switch (discov->state) {
1030         case DISCOVERY_FINDING:
1031         case DISCOVERY_RESOLVING:
1032                 return true;
1033
1034         default:
1035                 return false;
1036         }
1037 }
1038
1039 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1040 {
1041         int old_state = hdev->discovery.state;
1042
1043         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1044
1045         if (old_state == state)
1046                 return;
1047
1048         hdev->discovery.state = state;
1049
1050         switch (state) {
1051         case DISCOVERY_STOPPED:
1052                 hci_update_background_scan(hdev);
1053
1054                 if (old_state != DISCOVERY_STARTING)
1055                         mgmt_discovering(hdev, 0);
1056                 break;
1057         case DISCOVERY_STARTING:
1058                 break;
1059         case DISCOVERY_FINDING:
1060                 mgmt_discovering(hdev, 1);
1061                 break;
1062         case DISCOVERY_RESOLVING:
1063                 break;
1064         case DISCOVERY_STOPPING:
1065                 break;
1066         }
1067 }
1068
1069 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1070 {
1071         struct discovery_state *cache = &hdev->discovery;
1072         struct inquiry_entry *p, *n;
1073
1074         list_for_each_entry_safe(p, n, &cache->all, all) {
1075                 list_del(&p->all);
1076                 kfree(p);
1077         }
1078
1079         INIT_LIST_HEAD(&cache->unknown);
1080         INIT_LIST_HEAD(&cache->resolve);
1081 }
1082
1083 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1084                                                bdaddr_t *bdaddr)
1085 {
1086         struct discovery_state *cache = &hdev->discovery;
1087         struct inquiry_entry *e;
1088
1089         BT_DBG("cache %p, %pMR", cache, bdaddr);
1090
1091         list_for_each_entry(e, &cache->all, all) {
1092                 if (!bacmp(&e->data.bdaddr, bdaddr))
1093                         return e;
1094         }
1095
1096         return NULL;
1097 }
1098
1099 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1100                                                        bdaddr_t *bdaddr)
1101 {
1102         struct discovery_state *cache = &hdev->discovery;
1103         struct inquiry_entry *e;
1104
1105         BT_DBG("cache %p, %pMR", cache, bdaddr);
1106
1107         list_for_each_entry(e, &cache->unknown, list) {
1108                 if (!bacmp(&e->data.bdaddr, bdaddr))
1109                         return e;
1110         }
1111
1112         return NULL;
1113 }
1114
1115 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1116                                                        bdaddr_t *bdaddr,
1117                                                        int state)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct inquiry_entry *e;
1121
1122         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1123
1124         list_for_each_entry(e, &cache->resolve, list) {
1125                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1126                         return e;
1127                 if (!bacmp(&e->data.bdaddr, bdaddr))
1128                         return e;
1129         }
1130
1131         return NULL;
1132 }
1133
1134 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1135                                       struct inquiry_entry *ie)
1136 {
1137         struct discovery_state *cache = &hdev->discovery;
1138         struct list_head *pos = &cache->resolve;
1139         struct inquiry_entry *p;
1140
1141         list_del(&ie->list);
1142
1143         list_for_each_entry(p, &cache->resolve, list) {
1144                 if (p->name_state != NAME_PENDING &&
1145                     abs(p->data.rssi) >= abs(ie->data.rssi))
1146                         break;
1147                 pos = &p->list;
1148         }
1149
1150         list_add(&ie->list, pos);
1151 }
1152
1153 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1154                              bool name_known)
1155 {
1156         struct discovery_state *cache = &hdev->discovery;
1157         struct inquiry_entry *ie;
1158         u32 flags = 0;
1159
1160         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1161
1162         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1163
1164         if (!data->ssp_mode)
1165                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1166
1167         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1168         if (ie) {
1169                 if (!ie->data.ssp_mode)
1170                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1171
1172                 if (ie->name_state == NAME_NEEDED &&
1173                     data->rssi != ie->data.rssi) {
1174                         ie->data.rssi = data->rssi;
1175                         hci_inquiry_cache_update_resolve(hdev, ie);
1176                 }
1177
1178                 goto update;
1179         }
1180
1181         /* Entry not in the cache. Add new one. */
1182         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1183         if (!ie) {
1184                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1185                 goto done;
1186         }
1187
1188         list_add(&ie->all, &cache->all);
1189
1190         if (name_known) {
1191                 ie->name_state = NAME_KNOWN;
1192         } else {
1193                 ie->name_state = NAME_NOT_KNOWN;
1194                 list_add(&ie->list, &cache->unknown);
1195         }
1196
1197 update:
1198         if (name_known && ie->name_state != NAME_KNOWN &&
1199             ie->name_state != NAME_PENDING) {
1200                 ie->name_state = NAME_KNOWN;
1201                 list_del(&ie->list);
1202         }
1203
1204         memcpy(&ie->data, data, sizeof(*data));
1205         ie->timestamp = jiffies;
1206         cache->timestamp = jiffies;
1207
1208         if (ie->name_state == NAME_NOT_KNOWN)
1209                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1210
1211 done:
1212         return flags;
1213 }
1214
1215 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1216 {
1217         struct discovery_state *cache = &hdev->discovery;
1218         struct inquiry_info *info = (struct inquiry_info *) buf;
1219         struct inquiry_entry *e;
1220         int copied = 0;
1221
1222         list_for_each_entry(e, &cache->all, all) {
1223                 struct inquiry_data *data = &e->data;
1224
1225                 if (copied >= num)
1226                         break;
1227
1228                 bacpy(&info->bdaddr, &data->bdaddr);
1229                 info->pscan_rep_mode    = data->pscan_rep_mode;
1230                 info->pscan_period_mode = data->pscan_period_mode;
1231                 info->pscan_mode        = data->pscan_mode;
1232                 memcpy(info->dev_class, data->dev_class, 3);
1233                 info->clock_offset      = data->clock_offset;
1234
1235                 info++;
1236                 copied++;
1237         }
1238
1239         BT_DBG("cache %p, copied %d", cache, copied);
1240         return copied;
1241 }
1242
1243 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1244 {
1245         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1246         struct hci_dev *hdev = req->hdev;
1247         struct hci_cp_inquiry cp;
1248
1249         BT_DBG("%s", hdev->name);
1250
1251         if (test_bit(HCI_INQUIRY, &hdev->flags))
1252                 return 0;
1253
1254         /* Start Inquiry */
1255         memcpy(&cp.lap, &ir->lap, 3);
1256         cp.length  = ir->length;
1257         cp.num_rsp = ir->num_rsp;
1258         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1259
1260         return 0;
1261 }
1262
1263 int hci_inquiry(void __user *arg)
1264 {
1265         __u8 __user *ptr = arg;
1266         struct hci_inquiry_req ir;
1267         struct hci_dev *hdev;
1268         int err = 0, do_inquiry = 0, max_rsp;
1269         long timeo;
1270         __u8 *buf;
1271
1272         if (copy_from_user(&ir, ptr, sizeof(ir)))
1273                 return -EFAULT;
1274
1275         hdev = hci_dev_get(ir.dev_id);
1276         if (!hdev)
1277                 return -ENODEV;
1278
1279         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1280                 err = -EBUSY;
1281                 goto done;
1282         }
1283
1284         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1285                 err = -EOPNOTSUPP;
1286                 goto done;
1287         }
1288
1289         if (hdev->dev_type != HCI_PRIMARY) {
1290                 err = -EOPNOTSUPP;
1291                 goto done;
1292         }
1293
1294         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1295                 err = -EOPNOTSUPP;
1296                 goto done;
1297         }
1298
1299         /* Restrict maximum inquiry length to 60 seconds */
1300         if (ir.length > 60) {
1301                 err = -EINVAL;
1302                 goto done;
1303         }
1304
1305         hci_dev_lock(hdev);
1306         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1307             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1308                 hci_inquiry_cache_flush(hdev);
1309                 do_inquiry = 1;
1310         }
1311         hci_dev_unlock(hdev);
1312
1313         timeo = ir.length * msecs_to_jiffies(2000);
1314
1315         if (do_inquiry) {
1316                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1317                                    timeo, NULL);
1318                 if (err < 0)
1319                         goto done;
1320
1321                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1322                  * cleared). If it is interrupted by a signal, return -EINTR.
1323                  */
1324                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1325                                 TASK_INTERRUPTIBLE)) {
1326                         err = -EINTR;
1327                         goto done;
1328                 }
1329         }
1330
1331         /* for unlimited number of responses we will use buffer with
1332          * 255 entries
1333          */
1334         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1335
1336         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1337          * copy it to the user space.
1338          */
1339         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1340         if (!buf) {
1341                 err = -ENOMEM;
1342                 goto done;
1343         }
1344
1345         hci_dev_lock(hdev);
1346         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1347         hci_dev_unlock(hdev);
1348
1349         BT_DBG("num_rsp %d", ir.num_rsp);
1350
1351         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1352                 ptr += sizeof(ir);
1353                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1354                                  ir.num_rsp))
1355                         err = -EFAULT;
1356         } else
1357                 err = -EFAULT;
1358
1359         kfree(buf);
1360
1361 done:
1362         hci_dev_put(hdev);
1363         return err;
1364 }
1365
1366 static int hci_dev_do_open(struct hci_dev *hdev)
1367 {
1368         int ret = 0;
1369
1370         BT_DBG("%s %p", hdev->name, hdev);
1371
1372         hci_req_sync_lock(hdev);
1373
1374         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1375                 ret = -ENODEV;
1376                 goto done;
1377         }
1378
1379         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1380             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1381                 /* Check for rfkill but allow the HCI setup stage to
1382                  * proceed (which in itself doesn't cause any RF activity).
1383                  */
1384                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1385                         ret = -ERFKILL;
1386                         goto done;
1387                 }
1388
1389                 /* Check for valid public address or a configured static
1390                  * random adddress, but let the HCI setup proceed to
1391                  * be able to determine if there is a public address
1392                  * or not.
1393                  *
1394                  * In case of user channel usage, it is not important
1395                  * if a public address or static random address is
1396                  * available.
1397                  *
1398                  * This check is only valid for BR/EDR controllers
1399                  * since AMP controllers do not have an address.
1400                  */
1401                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1402                     hdev->dev_type == HCI_PRIMARY &&
1403                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1404                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1405                         ret = -EADDRNOTAVAIL;
1406                         goto done;
1407                 }
1408         }
1409
1410         if (test_bit(HCI_UP, &hdev->flags)) {
1411                 ret = -EALREADY;
1412                 goto done;
1413         }
1414
1415         if (hdev->open(hdev)) {
1416                 ret = -EIO;
1417                 goto done;
1418         }
1419
1420         set_bit(HCI_RUNNING, &hdev->flags);
1421         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1422
1423         atomic_set(&hdev->cmd_cnt, 1);
1424         set_bit(HCI_INIT, &hdev->flags);
1425
1426         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1427             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1428                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1429
1430                 if (hdev->setup)
1431                         ret = hdev->setup(hdev);
1432
1433                 /* The transport driver can set these quirks before
1434                  * creating the HCI device or in its setup callback.
1435                  *
1436                  * In case any of them is set, the controller has to
1437                  * start up as unconfigured.
1438                  */
1439                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1441                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1442
1443                 /* For an unconfigured controller it is required to
1444                  * read at least the version information provided by
1445                  * the Read Local Version Information command.
1446                  *
1447                  * If the set_bdaddr driver callback is provided, then
1448                  * also the original Bluetooth public device address
1449                  * will be read using the Read BD Address command.
1450                  */
1451                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1452                         ret = __hci_unconf_init(hdev);
1453         }
1454
1455         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1456                 /* If public address change is configured, ensure that
1457                  * the address gets programmed. If the driver does not
1458                  * support changing the public address, fail the power
1459                  * on procedure.
1460                  */
1461                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462                     hdev->set_bdaddr)
1463                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464                 else
1465                         ret = -EADDRNOTAVAIL;
1466         }
1467
1468         if (!ret) {
1469                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1470                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1471                         ret = __hci_init(hdev);
1472                         if (!ret && hdev->post_init)
1473                                 ret = hdev->post_init(hdev);
1474                 }
1475         }
1476
1477         /* If the HCI Reset command is clearing all diagnostic settings,
1478          * then they need to be reprogrammed after the init procedure
1479          * completed.
1480          */
1481         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1482             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1484                 ret = hdev->set_diag(hdev, true);
1485
1486         clear_bit(HCI_INIT, &hdev->flags);
1487
1488         if (!ret) {
1489                 hci_dev_hold(hdev);
1490                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1491                 hci_adv_instances_set_rpa_expired(hdev, true);
1492                 set_bit(HCI_UP, &hdev->flags);
1493                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1494                 hci_leds_update_powered(hdev, true);
1495                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1496                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1497                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1499                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1500                     hdev->dev_type == HCI_PRIMARY) {
1501                         ret = __hci_req_hci_power_on(hdev);
1502                         mgmt_power_on(hdev, ret);
1503                 }
1504         } else {
1505                 /* Init failed, cleanup */
1506                 flush_work(&hdev->tx_work);
1507
1508                 /* Since hci_rx_work() is possible to awake new cmd_work
1509                  * it should be flushed first to avoid unexpected call of
1510                  * hci_cmd_work()
1511                  */
1512                 flush_work(&hdev->rx_work);
1513                 flush_work(&hdev->cmd_work);
1514
1515                 skb_queue_purge(&hdev->cmd_q);
1516                 skb_queue_purge(&hdev->rx_q);
1517
1518                 if (hdev->flush)
1519                         hdev->flush(hdev);
1520
1521                 if (hdev->sent_cmd) {
1522                         kfree_skb(hdev->sent_cmd);
1523                         hdev->sent_cmd = NULL;
1524                 }
1525
1526                 clear_bit(HCI_RUNNING, &hdev->flags);
1527                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1528
1529                 hdev->close(hdev);
1530                 hdev->flags &= BIT(HCI_RAW);
1531         }
1532
1533 done:
1534         hci_req_sync_unlock(hdev);
1535         return ret;
1536 }
1537
1538 /* ---- HCI ioctl helpers ---- */
1539
1540 int hci_dev_open(__u16 dev)
1541 {
1542         struct hci_dev *hdev;
1543         int err;
1544
1545         hdev = hci_dev_get(dev);
1546         if (!hdev)
1547                 return -ENODEV;
1548
1549         /* Devices that are marked as unconfigured can only be powered
1550          * up as user channel. Trying to bring them up as normal devices
1551          * will result into a failure. Only user channel operation is
1552          * possible.
1553          *
1554          * When this function is called for a user channel, the flag
1555          * HCI_USER_CHANNEL will be set first before attempting to
1556          * open the device.
1557          */
1558         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1559             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1560                 err = -EOPNOTSUPP;
1561                 goto done;
1562         }
1563
1564         /* We need to ensure that no other power on/off work is pending
1565          * before proceeding to call hci_dev_do_open. This is
1566          * particularly important if the setup procedure has not yet
1567          * completed.
1568          */
1569         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1570                 cancel_delayed_work(&hdev->power_off);
1571
1572         /* After this call it is guaranteed that the setup procedure
1573          * has finished. This means that error conditions like RFKILL
1574          * or no valid public or static random address apply.
1575          */
1576         flush_workqueue(hdev->req_workqueue);
1577
1578         /* For controllers not using the management interface and that
1579          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1580          * so that pairing works for them. Once the management interface
1581          * is in use this bit will be cleared again and userspace has
1582          * to explicitly enable it.
1583          */
1584         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585             !hci_dev_test_flag(hdev, HCI_MGMT))
1586                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1587
1588         err = hci_dev_do_open(hdev);
1589
1590 done:
1591         hci_dev_put(hdev);
1592         return err;
1593 }
1594
1595 /* This function requires the caller holds hdev->lock */
1596 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1597 {
1598         struct hci_conn_params *p;
1599
1600         list_for_each_entry(p, &hdev->le_conn_params, list) {
1601                 if (p->conn) {
1602                         hci_conn_drop(p->conn);
1603                         hci_conn_put(p->conn);
1604                         p->conn = NULL;
1605                 }
1606                 list_del_init(&p->action);
1607         }
1608
1609         BT_DBG("All LE pending actions cleared");
1610 }
1611
1612 int hci_dev_do_close(struct hci_dev *hdev)
1613 {
1614         bool auto_off;
1615
1616         BT_DBG("%s %p", hdev->name, hdev);
1617
1618         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1619             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1620             test_bit(HCI_UP, &hdev->flags)) {
1621                 /* Execute vendor specific shutdown routine */
1622                 if (hdev->shutdown)
1623                         hdev->shutdown(hdev);
1624         }
1625
1626         cancel_delayed_work(&hdev->power_off);
1627
1628         hci_request_cancel_all(hdev);
1629         hci_req_sync_lock(hdev);
1630
1631         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1632             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1633             test_bit(HCI_UP, &hdev->flags)) {
1634                 /* Execute vendor specific shutdown routine */
1635                 if (hdev->shutdown)
1636                         hdev->shutdown(hdev);
1637         }
1638
1639         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1640                 cancel_delayed_work_sync(&hdev->cmd_timer);
1641                 hci_req_sync_unlock(hdev);
1642                 return 0;
1643         }
1644
1645         hci_leds_update_powered(hdev, false);
1646
1647         /* Flush RX and TX works */
1648         flush_work(&hdev->tx_work);
1649         flush_work(&hdev->rx_work);
1650
1651         if (hdev->discov_timeout > 0) {
1652                 hdev->discov_timeout = 0;
1653                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1654                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655         }
1656
1657         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1658                 cancel_delayed_work(&hdev->service_cache);
1659
1660         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1661                 struct adv_info *adv_instance;
1662
1663                 cancel_delayed_work_sync(&hdev->rpa_expired);
1664
1665                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1666                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1667         }
1668
1669         /* Avoid potential lockdep warnings from the *_flush() calls by
1670          * ensuring the workqueue is empty up front.
1671          */
1672         drain_workqueue(hdev->workqueue);
1673
1674         hci_dev_lock(hdev);
1675
1676         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1677
1678         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1679
1680         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1681             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1682             hci_dev_test_flag(hdev, HCI_MGMT))
1683                 __mgmt_power_off(hdev);
1684
1685         hci_inquiry_cache_flush(hdev);
1686         hci_pend_le_actions_clear(hdev);
1687         hci_conn_hash_flush(hdev);
1688         hci_dev_unlock(hdev);
1689
1690         smp_unregister(hdev);
1691
1692         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1693
1694         if (hdev->flush)
1695                 hdev->flush(hdev);
1696
1697         /* Reset device */
1698         skb_queue_purge(&hdev->cmd_q);
1699         atomic_set(&hdev->cmd_cnt, 1);
1700         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1701             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1702                 set_bit(HCI_INIT, &hdev->flags);
1703                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1704                 clear_bit(HCI_INIT, &hdev->flags);
1705         }
1706
1707         /* flush cmd  work */
1708         flush_work(&hdev->cmd_work);
1709
1710         /* Drop queues */
1711         skb_queue_purge(&hdev->rx_q);
1712         skb_queue_purge(&hdev->cmd_q);
1713         skb_queue_purge(&hdev->raw_q);
1714
1715         /* Drop last sent command */
1716         if (hdev->sent_cmd) {
1717                 cancel_delayed_work_sync(&hdev->cmd_timer);
1718                 kfree_skb(hdev->sent_cmd);
1719                 hdev->sent_cmd = NULL;
1720         }
1721
1722         clear_bit(HCI_RUNNING, &hdev->flags);
1723         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1724
1725         /* After this point our queues are empty
1726          * and no tasks are scheduled. */
1727         hdev->close(hdev);
1728
1729         /* Clear flags */
1730         hdev->flags &= BIT(HCI_RAW);
1731         hci_dev_clear_volatile_flags(hdev);
1732
1733         /* Controller radio is available but is currently powered down */
1734         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1735
1736         memset(hdev->eir, 0, sizeof(hdev->eir));
1737         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1738         bacpy(&hdev->random_addr, BDADDR_ANY);
1739
1740         hci_req_sync_unlock(hdev);
1741
1742         hci_dev_put(hdev);
1743         return 0;
1744 }
1745
1746 int hci_dev_close(__u16 dev)
1747 {
1748         struct hci_dev *hdev;
1749         int err;
1750
1751         hdev = hci_dev_get(dev);
1752         if (!hdev)
1753                 return -ENODEV;
1754
1755         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1756                 err = -EBUSY;
1757                 goto done;
1758         }
1759
1760         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1761                 cancel_delayed_work(&hdev->power_off);
1762
1763         err = hci_dev_do_close(hdev);
1764
1765 done:
1766         hci_dev_put(hdev);
1767         return err;
1768 }
1769
1770 static int hci_dev_do_reset(struct hci_dev *hdev)
1771 {
1772         int ret;
1773
1774         BT_DBG("%s %p", hdev->name, hdev);
1775
1776         hci_req_sync_lock(hdev);
1777
1778         /* Drop queues */
1779         skb_queue_purge(&hdev->rx_q);
1780         skb_queue_purge(&hdev->cmd_q);
1781
1782         /* Avoid potential lockdep warnings from the *_flush() calls by
1783          * ensuring the workqueue is empty up front.
1784          */
1785         drain_workqueue(hdev->workqueue);
1786
1787         hci_dev_lock(hdev);
1788         hci_inquiry_cache_flush(hdev);
1789         hci_conn_hash_flush(hdev);
1790         hci_dev_unlock(hdev);
1791
1792         if (hdev->flush)
1793                 hdev->flush(hdev);
1794
1795         atomic_set(&hdev->cmd_cnt, 1);
1796         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1797
1798         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1799
1800         hci_req_sync_unlock(hdev);
1801         return ret;
1802 }
1803
1804 int hci_dev_reset(__u16 dev)
1805 {
1806         struct hci_dev *hdev;
1807         int err;
1808
1809         hdev = hci_dev_get(dev);
1810         if (!hdev)
1811                 return -ENODEV;
1812
1813         if (!test_bit(HCI_UP, &hdev->flags)) {
1814                 err = -ENETDOWN;
1815                 goto done;
1816         }
1817
1818         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1819                 err = -EBUSY;
1820                 goto done;
1821         }
1822
1823         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1824                 err = -EOPNOTSUPP;
1825                 goto done;
1826         }
1827
1828         err = hci_dev_do_reset(hdev);
1829
1830 done:
1831         hci_dev_put(hdev);
1832         return err;
1833 }
1834
1835 int hci_dev_reset_stat(__u16 dev)
1836 {
1837         struct hci_dev *hdev;
1838         int ret = 0;
1839
1840         hdev = hci_dev_get(dev);
1841         if (!hdev)
1842                 return -ENODEV;
1843
1844         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1845                 ret = -EBUSY;
1846                 goto done;
1847         }
1848
1849         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1850                 ret = -EOPNOTSUPP;
1851                 goto done;
1852         }
1853
1854         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1855
1856 done:
1857         hci_dev_put(hdev);
1858         return ret;
1859 }
1860
1861 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1862 {
1863         bool conn_changed, discov_changed;
1864
1865         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1866
1867         if ((scan & SCAN_PAGE))
1868                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1869                                                           HCI_CONNECTABLE);
1870         else
1871                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1872                                                            HCI_CONNECTABLE);
1873
1874         if ((scan & SCAN_INQUIRY)) {
1875                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1876                                                             HCI_DISCOVERABLE);
1877         } else {
1878                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1879                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1880                                                              HCI_DISCOVERABLE);
1881         }
1882
1883         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1884                 return;
1885
1886         if (conn_changed || discov_changed) {
1887                 /* In case this was disabled through mgmt */
1888                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1889
1890                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1891                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1892
1893                 mgmt_new_settings(hdev);
1894         }
1895 }
1896
1897 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1898 {
1899         struct hci_dev *hdev;
1900         struct hci_dev_req dr;
1901         int err = 0;
1902
1903         if (copy_from_user(&dr, arg, sizeof(dr)))
1904                 return -EFAULT;
1905
1906         hdev = hci_dev_get(dr.dev_id);
1907         if (!hdev)
1908                 return -ENODEV;
1909
1910         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1911                 err = -EBUSY;
1912                 goto done;
1913         }
1914
1915         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1916                 err = -EOPNOTSUPP;
1917                 goto done;
1918         }
1919
1920         if (hdev->dev_type != HCI_PRIMARY) {
1921                 err = -EOPNOTSUPP;
1922                 goto done;
1923         }
1924
1925         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1926                 err = -EOPNOTSUPP;
1927                 goto done;
1928         }
1929
1930         switch (cmd) {
1931         case HCISETAUTH:
1932                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1933                                    HCI_INIT_TIMEOUT, NULL);
1934                 break;
1935
1936         case HCISETENCRYPT:
1937                 if (!lmp_encrypt_capable(hdev)) {
1938                         err = -EOPNOTSUPP;
1939                         break;
1940                 }
1941
1942                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1943                         /* Auth must be enabled first */
1944                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1945                                            HCI_INIT_TIMEOUT, NULL);
1946                         if (err)
1947                                 break;
1948                 }
1949
1950                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1951                                    HCI_INIT_TIMEOUT, NULL);
1952                 break;
1953
1954         case HCISETSCAN:
1955                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1956                                    HCI_INIT_TIMEOUT, NULL);
1957
1958                 /* Ensure that the connectable and discoverable states
1959                  * get correctly modified as this was a non-mgmt change.
1960                  */
1961                 if (!err)
1962                         hci_update_scan_state(hdev, dr.dev_opt);
1963                 break;
1964
1965         case HCISETLINKPOL:
1966                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1967                                    HCI_INIT_TIMEOUT, NULL);
1968                 break;
1969
1970         case HCISETLINKMODE:
1971                 hdev->link_mode = ((__u16) dr.dev_opt) &
1972                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1973                 break;
1974
1975         case HCISETPTYPE:
1976                 if (hdev->pkt_type == (__u16) dr.dev_opt)
1977                         break;
1978
1979                 hdev->pkt_type = (__u16) dr.dev_opt;
1980                 mgmt_phy_configuration_changed(hdev, NULL);
1981                 break;
1982
1983         case HCISETACLMTU:
1984                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1985                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1986                 break;
1987
1988         case HCISETSCOMTU:
1989                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1990                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1991                 break;
1992
1993         default:
1994                 err = -EINVAL;
1995                 break;
1996         }
1997
1998 done:
1999         hci_dev_put(hdev);
2000         return err;
2001 }
2002
2003 int hci_get_dev_list(void __user *arg)
2004 {
2005         struct hci_dev *hdev;
2006         struct hci_dev_list_req *dl;
2007         struct hci_dev_req *dr;
2008         int n = 0, size, err;
2009         __u16 dev_num;
2010
2011         if (get_user(dev_num, (__u16 __user *) arg))
2012                 return -EFAULT;
2013
2014         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2015                 return -EINVAL;
2016
2017         size = sizeof(*dl) + dev_num * sizeof(*dr);
2018
2019         dl = kzalloc(size, GFP_KERNEL);
2020         if (!dl)
2021                 return -ENOMEM;
2022
2023         dr = dl->dev_req;
2024
2025         read_lock(&hci_dev_list_lock);
2026         list_for_each_entry(hdev, &hci_dev_list, list) {
2027                 unsigned long flags = hdev->flags;
2028
2029                 /* When the auto-off is configured it means the transport
2030                  * is running, but in that case still indicate that the
2031                  * device is actually down.
2032                  */
2033                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2034                         flags &= ~BIT(HCI_UP);
2035
2036                 (dr + n)->dev_id  = hdev->id;
2037                 (dr + n)->dev_opt = flags;
2038
2039                 if (++n >= dev_num)
2040                         break;
2041         }
2042         read_unlock(&hci_dev_list_lock);
2043
2044         dl->dev_num = n;
2045         size = sizeof(*dl) + n * sizeof(*dr);
2046
2047         err = copy_to_user(arg, dl, size);
2048         kfree(dl);
2049
2050         return err ? -EFAULT : 0;
2051 }
2052
2053 int hci_get_dev_info(void __user *arg)
2054 {
2055         struct hci_dev *hdev;
2056         struct hci_dev_info di;
2057         unsigned long flags;
2058         int err = 0;
2059
2060         if (copy_from_user(&di, arg, sizeof(di)))
2061                 return -EFAULT;
2062
2063         hdev = hci_dev_get(di.dev_id);
2064         if (!hdev)
2065                 return -ENODEV;
2066
2067         /* When the auto-off is configured it means the transport
2068          * is running, but in that case still indicate that the
2069          * device is actually down.
2070          */
2071         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2072                 flags = hdev->flags & ~BIT(HCI_UP);
2073         else
2074                 flags = hdev->flags;
2075
2076         strcpy(di.name, hdev->name);
2077         di.bdaddr   = hdev->bdaddr;
2078         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2079         di.flags    = flags;
2080         di.pkt_type = hdev->pkt_type;
2081         if (lmp_bredr_capable(hdev)) {
2082                 di.acl_mtu  = hdev->acl_mtu;
2083                 di.acl_pkts = hdev->acl_pkts;
2084                 di.sco_mtu  = hdev->sco_mtu;
2085                 di.sco_pkts = hdev->sco_pkts;
2086         } else {
2087                 di.acl_mtu  = hdev->le_mtu;
2088                 di.acl_pkts = hdev->le_pkts;
2089                 di.sco_mtu  = 0;
2090                 di.sco_pkts = 0;
2091         }
2092         di.link_policy = hdev->link_policy;
2093         di.link_mode   = hdev->link_mode;
2094
2095         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2096         memcpy(&di.features, &hdev->features, sizeof(di.features));
2097
2098         if (copy_to_user(arg, &di, sizeof(di)))
2099                 err = -EFAULT;
2100
2101         hci_dev_put(hdev);
2102
2103         return err;
2104 }
2105
2106 /* ---- Interface to HCI drivers ---- */
2107
2108 static int hci_rfkill_set_block(void *data, bool blocked)
2109 {
2110         struct hci_dev *hdev = data;
2111
2112         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2113
2114         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2115                 return -EBUSY;
2116
2117         if (blocked) {
2118                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2119                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2120                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2121                         hci_dev_do_close(hdev);
2122         } else {
2123                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2124         }
2125
2126         return 0;
2127 }
2128
2129 static const struct rfkill_ops hci_rfkill_ops = {
2130         .set_block = hci_rfkill_set_block,
2131 };
2132
2133 static void hci_power_on(struct work_struct *work)
2134 {
2135         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2136         int err;
2137
2138         BT_DBG("%s", hdev->name);
2139
2140         if (test_bit(HCI_UP, &hdev->flags) &&
2141             hci_dev_test_flag(hdev, HCI_MGMT) &&
2142             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2143                 cancel_delayed_work(&hdev->power_off);
2144                 hci_req_sync_lock(hdev);
2145                 err = __hci_req_hci_power_on(hdev);
2146                 hci_req_sync_unlock(hdev);
2147                 mgmt_power_on(hdev, err);
2148                 return;
2149         }
2150
2151         err = hci_dev_do_open(hdev);
2152         if (err < 0) {
2153                 hci_dev_lock(hdev);
2154                 mgmt_set_powered_failed(hdev, err);
2155                 hci_dev_unlock(hdev);
2156                 return;
2157         }
2158
2159         /* During the HCI setup phase, a few error conditions are
2160          * ignored and they need to be checked now. If they are still
2161          * valid, it is important to turn the device back off.
2162          */
2163         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2164             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2165             (hdev->dev_type == HCI_PRIMARY &&
2166              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2167              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2168                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2169                 hci_dev_do_close(hdev);
2170         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2171                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2172                                    HCI_AUTO_OFF_TIMEOUT);
2173         }
2174
2175         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2176                 /* For unconfigured devices, set the HCI_RAW flag
2177                  * so that userspace can easily identify them.
2178                  */
2179                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2180                         set_bit(HCI_RAW, &hdev->flags);
2181
2182                 /* For fully configured devices, this will send
2183                  * the Index Added event. For unconfigured devices,
2184                  * it will send Unconfigued Index Added event.
2185                  *
2186                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2187                  * and no event will be send.
2188                  */
2189                 mgmt_index_added(hdev);
2190         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2191                 /* When the controller is now configured, then it
2192                  * is important to clear the HCI_RAW flag.
2193                  */
2194                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2195                         clear_bit(HCI_RAW, &hdev->flags);
2196
2197                 /* Powering on the controller with HCI_CONFIG set only
2198                  * happens with the transition from unconfigured to
2199                  * configured. This will send the Index Added event.
2200                  */
2201                 mgmt_index_added(hdev);
2202         }
2203 }
2204
2205 static void hci_power_off(struct work_struct *work)
2206 {
2207         struct hci_dev *hdev = container_of(work, struct hci_dev,
2208                                             power_off.work);
2209
2210         BT_DBG("%s", hdev->name);
2211
2212         hci_dev_do_close(hdev);
2213 }
2214
2215 static void hci_error_reset(struct work_struct *work)
2216 {
2217         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2218
2219         BT_DBG("%s", hdev->name);
2220
2221         if (hdev->hw_error)
2222                 hdev->hw_error(hdev, hdev->hw_error_code);
2223         else
2224                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2225
2226         if (hci_dev_do_close(hdev))
2227                 return;
2228
2229         hci_dev_do_open(hdev);
2230 }
2231
2232 void hci_uuids_clear(struct hci_dev *hdev)
2233 {
2234         struct bt_uuid *uuid, *tmp;
2235
2236         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2237                 list_del(&uuid->list);
2238                 kfree(uuid);
2239         }
2240 }
2241
2242 void hci_link_keys_clear(struct hci_dev *hdev)
2243 {
2244         struct link_key *key;
2245
2246         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2247                 list_del_rcu(&key->list);
2248                 kfree_rcu(key, rcu);
2249         }
2250 }
2251
2252 void hci_smp_ltks_clear(struct hci_dev *hdev)
2253 {
2254         struct smp_ltk *k;
2255
2256         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2257                 list_del_rcu(&k->list);
2258                 kfree_rcu(k, rcu);
2259         }
2260 }
2261
2262 void hci_smp_irks_clear(struct hci_dev *hdev)
2263 {
2264         struct smp_irk *k;
2265
2266         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2267                 list_del_rcu(&k->list);
2268                 kfree_rcu(k, rcu);
2269         }
2270 }
2271
2272 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2273 {
2274         struct link_key *k;
2275
2276         rcu_read_lock();
2277         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2278                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2279                         rcu_read_unlock();
2280                         return k;
2281                 }
2282         }
2283         rcu_read_unlock();
2284
2285         return NULL;
2286 }
2287
2288 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2289                                u8 key_type, u8 old_key_type)
2290 {
2291         /* Legacy key */
2292         if (key_type < 0x03)
2293                 return true;
2294
2295         /* Debug keys are insecure so don't store them persistently */
2296         if (key_type == HCI_LK_DEBUG_COMBINATION)
2297                 return false;
2298
2299         /* Changed combination key and there's no previous one */
2300         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2301                 return false;
2302
2303         /* Security mode 3 case */
2304         if (!conn)
2305                 return true;
2306
2307         /* BR/EDR key derived using SC from an LE link */
2308         if (conn->type == LE_LINK)
2309                 return true;
2310
2311         /* Neither local nor remote side had no-bonding as requirement */
2312         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2313                 return true;
2314
2315         /* Local side had dedicated bonding as requirement */
2316         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2317                 return true;
2318
2319         /* Remote side had dedicated bonding as requirement */
2320         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2321                 return true;
2322
2323         /* If none of the above criteria match, then don't store the key
2324          * persistently */
2325         return false;
2326 }
2327
2328 static u8 ltk_role(u8 type)
2329 {
2330         if (type == SMP_LTK)
2331                 return HCI_ROLE_MASTER;
2332
2333         return HCI_ROLE_SLAVE;
2334 }
2335
2336 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2337                              u8 addr_type, u8 role)
2338 {
2339         struct smp_ltk *k;
2340
2341         rcu_read_lock();
2342         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2343                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2344                         continue;
2345
2346                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2347                         rcu_read_unlock();
2348                         return k;
2349                 }
2350         }
2351         rcu_read_unlock();
2352
2353         return NULL;
2354 }
2355
2356 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2357 {
2358         struct smp_irk *irk;
2359
2360         rcu_read_lock();
2361         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2362                 if (!bacmp(&irk->rpa, rpa)) {
2363                         rcu_read_unlock();
2364                         return irk;
2365                 }
2366         }
2367
2368         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2369                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2370                         bacpy(&irk->rpa, rpa);
2371                         rcu_read_unlock();
2372                         return irk;
2373                 }
2374         }
2375         rcu_read_unlock();
2376
2377         return NULL;
2378 }
2379
2380 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2381                                      u8 addr_type)
2382 {
2383         struct smp_irk *irk;
2384
2385         /* Identity Address must be public or static random */
2386         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2387                 return NULL;
2388
2389         rcu_read_lock();
2390         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2391                 if (addr_type == irk->addr_type &&
2392                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2393                         rcu_read_unlock();
2394                         return irk;
2395                 }
2396         }
2397         rcu_read_unlock();
2398
2399         return NULL;
2400 }
2401
2402 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2403                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2404                                   u8 pin_len, bool *persistent)
2405 {
2406         struct link_key *key, *old_key;
2407         u8 old_key_type;
2408
2409         old_key = hci_find_link_key(hdev, bdaddr);
2410         if (old_key) {
2411                 old_key_type = old_key->type;
2412                 key = old_key;
2413         } else {
2414                 old_key_type = conn ? conn->key_type : 0xff;
2415                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2416                 if (!key)
2417                         return NULL;
2418                 list_add_rcu(&key->list, &hdev->link_keys);
2419         }
2420
2421         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2422
2423         /* Some buggy controller combinations generate a changed
2424          * combination key for legacy pairing even when there's no
2425          * previous key */
2426         if (type == HCI_LK_CHANGED_COMBINATION &&
2427             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2428                 type = HCI_LK_COMBINATION;
2429                 if (conn)
2430                         conn->key_type = type;
2431         }
2432
2433         bacpy(&key->bdaddr, bdaddr);
2434         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2435         key->pin_len = pin_len;
2436
2437         if (type == HCI_LK_CHANGED_COMBINATION)
2438                 key->type = old_key_type;
2439         else
2440                 key->type = type;
2441
2442         if (persistent)
2443                 *persistent = hci_persistent_key(hdev, conn, type,
2444                                                  old_key_type);
2445
2446         return key;
2447 }
2448
2449 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2450                             u8 addr_type, u8 type, u8 authenticated,
2451                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2452 {
2453         struct smp_ltk *key, *old_key;
2454         u8 role = ltk_role(type);
2455
2456         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2457         if (old_key)
2458                 key = old_key;
2459         else {
2460                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2461                 if (!key)
2462                         return NULL;
2463                 list_add_rcu(&key->list, &hdev->long_term_keys);
2464         }
2465
2466         bacpy(&key->bdaddr, bdaddr);
2467         key->bdaddr_type = addr_type;
2468         memcpy(key->val, tk, sizeof(key->val));
2469         key->authenticated = authenticated;
2470         key->ediv = ediv;
2471         key->rand = rand;
2472         key->enc_size = enc_size;
2473         key->type = type;
2474
2475         return key;
2476 }
2477
2478 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2479                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2480 {
2481         struct smp_irk *irk;
2482
2483         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2484         if (!irk) {
2485                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2486                 if (!irk)
2487                         return NULL;
2488
2489                 bacpy(&irk->bdaddr, bdaddr);
2490                 irk->addr_type = addr_type;
2491
2492                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2493         }
2494
2495         memcpy(irk->val, val, 16);
2496         bacpy(&irk->rpa, rpa);
2497
2498         return irk;
2499 }
2500
2501 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2502 {
2503         struct link_key *key;
2504
2505         key = hci_find_link_key(hdev, bdaddr);
2506         if (!key)
2507                 return -ENOENT;
2508
2509         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2510
2511         list_del_rcu(&key->list);
2512         kfree_rcu(key, rcu);
2513
2514         return 0;
2515 }
2516
2517 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2518 {
2519         struct smp_ltk *k;
2520         int removed = 0;
2521
2522         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2523                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2524                         continue;
2525
2526                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2527
2528                 list_del_rcu(&k->list);
2529                 kfree_rcu(k, rcu);
2530                 removed++;
2531         }
2532
2533         return removed ? 0 : -ENOENT;
2534 }
2535
2536 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2537 {
2538         struct smp_irk *k;
2539
2540         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2541                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2542                         continue;
2543
2544                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2545
2546                 list_del_rcu(&k->list);
2547                 kfree_rcu(k, rcu);
2548         }
2549 }
2550
2551 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2552 {
2553         struct smp_ltk *k;
2554         struct smp_irk *irk;
2555         u8 addr_type;
2556
2557         if (type == BDADDR_BREDR) {
2558                 if (hci_find_link_key(hdev, bdaddr))
2559                         return true;
2560                 return false;
2561         }
2562
2563         /* Convert to HCI addr type which struct smp_ltk uses */
2564         if (type == BDADDR_LE_PUBLIC)
2565                 addr_type = ADDR_LE_DEV_PUBLIC;
2566         else
2567                 addr_type = ADDR_LE_DEV_RANDOM;
2568
2569         irk = hci_get_irk(hdev, bdaddr, addr_type);
2570         if (irk) {
2571                 bdaddr = &irk->bdaddr;
2572                 addr_type = irk->addr_type;
2573         }
2574
2575         rcu_read_lock();
2576         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2577                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2578                         rcu_read_unlock();
2579                         return true;
2580                 }
2581         }
2582         rcu_read_unlock();
2583
2584         return false;
2585 }
2586
2587 /* HCI command timer function */
2588 static void hci_cmd_timeout(struct work_struct *work)
2589 {
2590         struct hci_dev *hdev = container_of(work, struct hci_dev,
2591                                             cmd_timer.work);
2592
2593         if (hdev->sent_cmd) {
2594                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2595                 u16 opcode = __le16_to_cpu(sent->opcode);
2596
2597                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2598         } else {
2599                 bt_dev_err(hdev, "command tx timeout");
2600         }
2601
2602         atomic_set(&hdev->cmd_cnt, 1);
2603         queue_work(hdev->workqueue, &hdev->cmd_work);
2604 }
2605
2606 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2607                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2608 {
2609         struct oob_data *data;
2610
2611         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2612                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2613                         continue;
2614                 if (data->bdaddr_type != bdaddr_type)
2615                         continue;
2616                 return data;
2617         }
2618
2619         return NULL;
2620 }
2621
2622 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2623                                u8 bdaddr_type)
2624 {
2625         struct oob_data *data;
2626
2627         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2628         if (!data)
2629                 return -ENOENT;
2630
2631         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2632
2633         list_del(&data->list);
2634         kfree(data);
2635
2636         return 0;
2637 }
2638
2639 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2640 {
2641         struct oob_data *data, *n;
2642
2643         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2644                 list_del(&data->list);
2645                 kfree(data);
2646         }
2647 }
2648
2649 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2650                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2651                             u8 *hash256, u8 *rand256)
2652 {
2653         struct oob_data *data;
2654
2655         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2656         if (!data) {
2657                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2658                 if (!data)
2659                         return -ENOMEM;
2660
2661                 bacpy(&data->bdaddr, bdaddr);
2662                 data->bdaddr_type = bdaddr_type;
2663                 list_add(&data->list, &hdev->remote_oob_data);
2664         }
2665
2666         if (hash192 && rand192) {
2667                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2668                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2669                 if (hash256 && rand256)
2670                         data->present = 0x03;
2671         } else {
2672                 memset(data->hash192, 0, sizeof(data->hash192));
2673                 memset(data->rand192, 0, sizeof(data->rand192));
2674                 if (hash256 && rand256)
2675                         data->present = 0x02;
2676                 else
2677                         data->present = 0x00;
2678         }
2679
2680         if (hash256 && rand256) {
2681                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2682                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2683         } else {
2684                 memset(data->hash256, 0, sizeof(data->hash256));
2685                 memset(data->rand256, 0, sizeof(data->rand256));
2686                 if (hash192 && rand192)
2687                         data->present = 0x01;
2688         }
2689
2690         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2691
2692         return 0;
2693 }
2694
2695 /* This function requires the caller holds hdev->lock */
2696 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2697 {
2698         struct adv_info *adv_instance;
2699
2700         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2701                 if (adv_instance->instance == instance)
2702                         return adv_instance;
2703         }
2704
2705         return NULL;
2706 }
2707
2708 /* This function requires the caller holds hdev->lock */
2709 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2710 {
2711         struct adv_info *cur_instance;
2712
2713         cur_instance = hci_find_adv_instance(hdev, instance);
2714         if (!cur_instance)
2715                 return NULL;
2716
2717         if (cur_instance == list_last_entry(&hdev->adv_instances,
2718                                             struct adv_info, list))
2719                 return list_first_entry(&hdev->adv_instances,
2720                                                  struct adv_info, list);
2721         else
2722                 return list_next_entry(cur_instance, list);
2723 }
2724
2725 /* This function requires the caller holds hdev->lock */
2726 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2727 {
2728         struct adv_info *adv_instance;
2729
2730         adv_instance = hci_find_adv_instance(hdev, instance);
2731         if (!adv_instance)
2732                 return -ENOENT;
2733
2734         BT_DBG("%s removing %dMR", hdev->name, instance);
2735
2736         if (hdev->cur_adv_instance == instance) {
2737                 if (hdev->adv_instance_timeout) {
2738                         cancel_delayed_work(&hdev->adv_instance_expire);
2739                         hdev->adv_instance_timeout = 0;
2740                 }
2741                 hdev->cur_adv_instance = 0x00;
2742         }
2743
2744         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2745
2746         list_del(&adv_instance->list);
2747         kfree(adv_instance);
2748
2749         hdev->adv_instance_cnt--;
2750
2751         return 0;
2752 }
2753
2754 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2755 {
2756         struct adv_info *adv_instance, *n;
2757
2758         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2759                 adv_instance->rpa_expired = rpa_expired;
2760 }
2761
2762 /* This function requires the caller holds hdev->lock */
2763 void hci_adv_instances_clear(struct hci_dev *hdev)
2764 {
2765         struct adv_info *adv_instance, *n;
2766
2767         if (hdev->adv_instance_timeout) {
2768                 cancel_delayed_work(&hdev->adv_instance_expire);
2769                 hdev->adv_instance_timeout = 0;
2770         }
2771
2772         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2773                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2774                 list_del(&adv_instance->list);
2775                 kfree(adv_instance);
2776         }
2777
2778         hdev->adv_instance_cnt = 0;
2779         hdev->cur_adv_instance = 0x00;
2780 }
2781
2782 static void adv_instance_rpa_expired(struct work_struct *work)
2783 {
2784         struct adv_info *adv_instance = container_of(work, struct adv_info,
2785                                                      rpa_expired_cb.work);
2786
2787         BT_DBG("");
2788
2789         adv_instance->rpa_expired = true;
2790 }
2791
2792 /* This function requires the caller holds hdev->lock */
2793 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2794                          u16 adv_data_len, u8 *adv_data,
2795                          u16 scan_rsp_len, u8 *scan_rsp_data,
2796                          u16 timeout, u16 duration)
2797 {
2798         struct adv_info *adv_instance;
2799
2800         adv_instance = hci_find_adv_instance(hdev, instance);
2801         if (adv_instance) {
2802                 memset(adv_instance->adv_data, 0,
2803                        sizeof(adv_instance->adv_data));
2804                 memset(adv_instance->scan_rsp_data, 0,
2805                        sizeof(adv_instance->scan_rsp_data));
2806         } else {
2807                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2808                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2809                         return -EOVERFLOW;
2810
2811                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2812                 if (!adv_instance)
2813                         return -ENOMEM;
2814
2815                 adv_instance->pending = true;
2816                 adv_instance->instance = instance;
2817                 list_add(&adv_instance->list, &hdev->adv_instances);
2818                 hdev->adv_instance_cnt++;
2819         }
2820
2821         adv_instance->flags = flags;
2822         adv_instance->adv_data_len = adv_data_len;
2823         adv_instance->scan_rsp_len = scan_rsp_len;
2824
2825         if (adv_data_len)
2826                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2827
2828         if (scan_rsp_len)
2829                 memcpy(adv_instance->scan_rsp_data,
2830                        scan_rsp_data, scan_rsp_len);
2831
2832         adv_instance->timeout = timeout;
2833         adv_instance->remaining_time = timeout;
2834
2835         if (duration == 0)
2836                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2837         else
2838                 adv_instance->duration = duration;
2839
2840         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2841
2842         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2843                           adv_instance_rpa_expired);
2844
2845         BT_DBG("%s for %dMR", hdev->name, instance);
2846
2847         return 0;
2848 }
2849
2850 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2851                                          bdaddr_t *bdaddr, u8 type)
2852 {
2853         struct bdaddr_list *b;
2854
2855         list_for_each_entry(b, bdaddr_list, list) {
2856                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2857                         return b;
2858         }
2859
2860         return NULL;
2861 }
2862
2863 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2864 {
2865         struct bdaddr_list *b, *n;
2866
2867         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2868                 list_del(&b->list);
2869                 kfree(b);
2870         }
2871 }
2872
2873 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2874 {
2875         struct bdaddr_list *entry;
2876
2877         if (!bacmp(bdaddr, BDADDR_ANY))
2878                 return -EBADF;
2879
2880         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2881                 return -EEXIST;
2882
2883         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2884         if (!entry)
2885                 return -ENOMEM;
2886
2887         bacpy(&entry->bdaddr, bdaddr);
2888         entry->bdaddr_type = type;
2889
2890         list_add(&entry->list, list);
2891
2892         return 0;
2893 }
2894
2895 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2896 {
2897         struct bdaddr_list *entry;
2898
2899         if (!bacmp(bdaddr, BDADDR_ANY)) {
2900                 hci_bdaddr_list_clear(list);
2901                 return 0;
2902         }
2903
2904         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2905         if (!entry)
2906                 return -ENOENT;
2907
2908         list_del(&entry->list);
2909         kfree(entry);
2910
2911         return 0;
2912 }
2913
2914 /* This function requires the caller holds hdev->lock */
2915 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2916                                                bdaddr_t *addr, u8 addr_type)
2917 {
2918         struct hci_conn_params *params;
2919
2920         list_for_each_entry(params, &hdev->le_conn_params, list) {
2921                 if (bacmp(&params->addr, addr) == 0 &&
2922                     params->addr_type == addr_type) {
2923                         return params;
2924                 }
2925         }
2926
2927         return NULL;
2928 }
2929
2930 /* This function requires the caller holds hdev->lock */
2931 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2932                                                   bdaddr_t *addr, u8 addr_type)
2933 {
2934         struct hci_conn_params *param;
2935
2936         list_for_each_entry(param, list, action) {
2937                 if (bacmp(&param->addr, addr) == 0 &&
2938                     param->addr_type == addr_type)
2939                         return param;
2940         }
2941
2942         return NULL;
2943 }
2944
2945 /* This function requires the caller holds hdev->lock */
2946 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2947                                             bdaddr_t *addr, u8 addr_type)
2948 {
2949         struct hci_conn_params *params;
2950
2951         params = hci_conn_params_lookup(hdev, addr, addr_type);
2952         if (params)
2953                 return params;
2954
2955         params = kzalloc(sizeof(*params), GFP_KERNEL);
2956         if (!params) {
2957                 bt_dev_err(hdev, "out of memory");
2958                 return NULL;
2959         }
2960
2961         bacpy(&params->addr, addr);
2962         params->addr_type = addr_type;
2963
2964         list_add(&params->list, &hdev->le_conn_params);
2965         INIT_LIST_HEAD(&params->action);
2966
2967         params->conn_min_interval = hdev->le_conn_min_interval;
2968         params->conn_max_interval = hdev->le_conn_max_interval;
2969         params->conn_latency = hdev->le_conn_latency;
2970         params->supervision_timeout = hdev->le_supv_timeout;
2971         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2972
2973         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2974
2975         return params;
2976 }
2977
2978 static void hci_conn_params_free(struct hci_conn_params *params)
2979 {
2980         if (params->conn) {
2981                 hci_conn_drop(params->conn);
2982                 hci_conn_put(params->conn);
2983         }
2984
2985         list_del(&params->action);
2986         list_del(&params->list);
2987         kfree(params);
2988 }
2989
2990 /* This function requires the caller holds hdev->lock */
2991 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2992 {
2993         struct hci_conn_params *params;
2994
2995         params = hci_conn_params_lookup(hdev, addr, addr_type);
2996         if (!params)
2997                 return;
2998
2999         hci_conn_params_free(params);
3000
3001         hci_update_background_scan(hdev);
3002
3003         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3004 }
3005
3006 /* This function requires the caller holds hdev->lock */
3007 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3008 {
3009         struct hci_conn_params *params, *tmp;
3010
3011         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3012                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3013                         continue;
3014
3015                 /* If trying to estabilish one time connection to disabled
3016                  * device, leave the params, but mark them as just once.
3017                  */
3018                 if (params->explicit_connect) {
3019                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3020                         continue;
3021                 }
3022
3023                 list_del(&params->list);
3024                 kfree(params);
3025         }
3026
3027         BT_DBG("All LE disabled connection parameters were removed");
3028 }
3029
3030 /* This function requires the caller holds hdev->lock */
3031 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3032 {
3033         struct hci_conn_params *params, *tmp;
3034
3035         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3036                 hci_conn_params_free(params);
3037
3038         BT_DBG("All LE connection parameters were removed");
3039 }
3040
3041 /* Copy the Identity Address of the controller.
3042  *
3043  * If the controller has a public BD_ADDR, then by default use that one.
3044  * If this is a LE only controller without a public address, default to
3045  * the static random address.
3046  *
3047  * For debugging purposes it is possible to force controllers with a
3048  * public address to use the static random address instead.
3049  *
3050  * In case BR/EDR has been disabled on a dual-mode controller and
3051  * userspace has configured a static address, then that address
3052  * becomes the identity address instead of the public BR/EDR address.
3053  */
3054 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3055                                u8 *bdaddr_type)
3056 {
3057         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3058             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3059             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3060              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3061                 bacpy(bdaddr, &hdev->static_addr);
3062                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3063         } else {
3064                 bacpy(bdaddr, &hdev->bdaddr);
3065                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3066         }
3067 }
3068
3069 /* Alloc HCI device */
3070 struct hci_dev *hci_alloc_dev(void)
3071 {
3072         struct hci_dev *hdev;
3073
3074         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3075         if (!hdev)
3076                 return NULL;
3077
3078         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3079         hdev->esco_type = (ESCO_HV1);
3080         hdev->link_mode = (HCI_LM_ACCEPT);
3081         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3082         hdev->io_capability = 0x03;     /* No Input No Output */
3083         hdev->manufacturer = 0xffff;    /* Default to internal use */
3084         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3085         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3086         hdev->adv_instance_cnt = 0;
3087         hdev->cur_adv_instance = 0x00;
3088         hdev->adv_instance_timeout = 0;
3089
3090         hdev->sniff_max_interval = 800;
3091         hdev->sniff_min_interval = 80;
3092
3093         hdev->le_adv_channel_map = 0x07;
3094         hdev->le_adv_min_interval = 0x0800;
3095         hdev->le_adv_max_interval = 0x0800;
3096         hdev->le_scan_interval = 0x0060;
3097         hdev->le_scan_window = 0x0030;
3098         hdev->le_conn_min_interval = 0x0018;
3099         hdev->le_conn_max_interval = 0x0028;
3100         hdev->le_conn_latency = 0x0000;
3101         hdev->le_supv_timeout = 0x002a;
3102         hdev->le_def_tx_len = 0x001b;
3103         hdev->le_def_tx_time = 0x0148;
3104         hdev->le_max_tx_len = 0x001b;
3105         hdev->le_max_tx_time = 0x0148;
3106         hdev->le_max_rx_len = 0x001b;
3107         hdev->le_max_rx_time = 0x0148;
3108         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3109         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3110         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3111         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3112
3113         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3114         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3115         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3116         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3117
3118         mutex_init(&hdev->lock);
3119         mutex_init(&hdev->req_lock);
3120
3121         INIT_LIST_HEAD(&hdev->mgmt_pending);
3122         INIT_LIST_HEAD(&hdev->blacklist);
3123         INIT_LIST_HEAD(&hdev->whitelist);
3124         INIT_LIST_HEAD(&hdev->uuids);
3125         INIT_LIST_HEAD(&hdev->link_keys);
3126         INIT_LIST_HEAD(&hdev->long_term_keys);
3127         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3128         INIT_LIST_HEAD(&hdev->remote_oob_data);
3129         INIT_LIST_HEAD(&hdev->le_white_list);
3130         INIT_LIST_HEAD(&hdev->le_resolv_list);
3131         INIT_LIST_HEAD(&hdev->le_conn_params);
3132         INIT_LIST_HEAD(&hdev->pend_le_conns);
3133         INIT_LIST_HEAD(&hdev->pend_le_reports);
3134         INIT_LIST_HEAD(&hdev->conn_hash.list);
3135         INIT_LIST_HEAD(&hdev->adv_instances);
3136
3137         INIT_WORK(&hdev->rx_work, hci_rx_work);
3138         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3139         INIT_WORK(&hdev->tx_work, hci_tx_work);
3140         INIT_WORK(&hdev->power_on, hci_power_on);
3141         INIT_WORK(&hdev->error_reset, hci_error_reset);
3142
3143         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3144
3145         skb_queue_head_init(&hdev->rx_q);
3146         skb_queue_head_init(&hdev->cmd_q);
3147         skb_queue_head_init(&hdev->raw_q);
3148
3149         init_waitqueue_head(&hdev->req_wait_q);
3150
3151         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3152
3153         hci_request_setup(hdev);
3154
3155         hci_init_sysfs(hdev);
3156         discovery_init(hdev);
3157
3158         return hdev;
3159 }
3160 EXPORT_SYMBOL(hci_alloc_dev);
3161
3162 /* Free HCI device */
3163 void hci_free_dev(struct hci_dev *hdev)
3164 {
3165         /* will free via device release */
3166         put_device(&hdev->dev);
3167 }
3168 EXPORT_SYMBOL(hci_free_dev);
3169
3170 /* Register HCI device */
3171 int hci_register_dev(struct hci_dev *hdev)
3172 {
3173         int id, error;
3174
3175         if (!hdev->open || !hdev->close || !hdev->send)
3176                 return -EINVAL;
3177
3178         /* Do not allow HCI_AMP devices to register at index 0,
3179          * so the index can be used as the AMP controller ID.
3180          */
3181         switch (hdev->dev_type) {
3182         case HCI_PRIMARY:
3183                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3184                 break;
3185         case HCI_AMP:
3186                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3187                 break;
3188         default:
3189                 return -EINVAL;
3190         }
3191
3192         if (id < 0)
3193                 return id;
3194
3195         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3196         hdev->id = id;
3197
3198         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3199
3200         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3201         if (!hdev->workqueue) {
3202                 error = -ENOMEM;
3203                 goto err;
3204         }
3205
3206         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3207                                                       hdev->name);
3208         if (!hdev->req_workqueue) {
3209                 destroy_workqueue(hdev->workqueue);
3210                 error = -ENOMEM;
3211                 goto err;
3212         }
3213
3214         if (!IS_ERR_OR_NULL(bt_debugfs))
3215                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3216
3217         dev_set_name(&hdev->dev, "%s", hdev->name);
3218
3219         error = device_add(&hdev->dev);
3220         if (error < 0)
3221                 goto err_wqueue;
3222
3223         hci_leds_init(hdev);
3224
3225         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3226                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3227                                     hdev);
3228         if (hdev->rfkill) {
3229                 if (rfkill_register(hdev->rfkill) < 0) {
3230                         rfkill_destroy(hdev->rfkill);
3231                         hdev->rfkill = NULL;
3232                 }
3233         }
3234
3235         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3236                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3237
3238         hci_dev_set_flag(hdev, HCI_SETUP);
3239         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3240
3241         if (hdev->dev_type == HCI_PRIMARY) {
3242                 /* Assume BR/EDR support until proven otherwise (such as
3243                  * through reading supported features during init.
3244                  */
3245                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3246         }
3247
3248         write_lock(&hci_dev_list_lock);
3249         list_add(&hdev->list, &hci_dev_list);
3250         write_unlock(&hci_dev_list_lock);
3251
3252         /* Devices that are marked for raw-only usage are unconfigured
3253          * and should not be included in normal operation.
3254          */
3255         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3256                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3257
3258         hci_sock_dev_event(hdev, HCI_DEV_REG);
3259         hci_dev_hold(hdev);
3260
3261         queue_work(hdev->req_workqueue, &hdev->power_on);
3262
3263         return id;
3264
3265 err_wqueue:
3266         debugfs_remove_recursive(hdev->debugfs);
3267         destroy_workqueue(hdev->workqueue);
3268         destroy_workqueue(hdev->req_workqueue);
3269 err:
3270         ida_simple_remove(&hci_index_ida, hdev->id);
3271
3272         return error;
3273 }
3274 EXPORT_SYMBOL(hci_register_dev);
3275
3276 /* Unregister HCI device */
3277 void hci_unregister_dev(struct hci_dev *hdev)
3278 {
3279         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3280
3281         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3282
3283         write_lock(&hci_dev_list_lock);
3284         list_del(&hdev->list);
3285         write_unlock(&hci_dev_list_lock);
3286
3287         cancel_work_sync(&hdev->power_on);
3288
3289         hci_dev_do_close(hdev);
3290
3291         if (!test_bit(HCI_INIT, &hdev->flags) &&
3292             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3293             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3294                 hci_dev_lock(hdev);
3295                 mgmt_index_removed(hdev);
3296                 hci_dev_unlock(hdev);
3297         }
3298
3299         /* mgmt_index_removed should take care of emptying the
3300          * pending list */
3301         BUG_ON(!list_empty(&hdev->mgmt_pending));
3302
3303         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3304
3305         if (hdev->rfkill) {
3306                 rfkill_unregister(hdev->rfkill);
3307                 rfkill_destroy(hdev->rfkill);
3308         }
3309
3310         device_del(&hdev->dev);
3311         /* Actual cleanup is deferred until hci_cleanup_dev(). */
3312         hci_dev_put(hdev);
3313 }
3314 EXPORT_SYMBOL(hci_unregister_dev);
3315
3316 /* Cleanup HCI device */
3317 void hci_cleanup_dev(struct hci_dev *hdev)
3318 {
3319         debugfs_remove_recursive(hdev->debugfs);
3320         kfree_const(hdev->hw_info);
3321         kfree_const(hdev->fw_info);
3322
3323         destroy_workqueue(hdev->workqueue);
3324         destroy_workqueue(hdev->req_workqueue);
3325
3326         hci_dev_lock(hdev);
3327         hci_bdaddr_list_clear(&hdev->blacklist);
3328         hci_bdaddr_list_clear(&hdev->whitelist);
3329         hci_uuids_clear(hdev);
3330         hci_link_keys_clear(hdev);
3331         hci_smp_ltks_clear(hdev);
3332         hci_smp_irks_clear(hdev);
3333         hci_remote_oob_data_clear(hdev);
3334         hci_adv_instances_clear(hdev);
3335         hci_bdaddr_list_clear(&hdev->le_white_list);
3336         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3337         hci_conn_params_clear_all(hdev);
3338         hci_discovery_filter_clear(hdev);
3339         hci_dev_unlock(hdev);
3340
3341         ida_simple_remove(&hci_index_ida, hdev->id);
3342 }
3343
3344 /* Suspend HCI device */
3345 int hci_suspend_dev(struct hci_dev *hdev)
3346 {
3347         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3348         return 0;
3349 }
3350 EXPORT_SYMBOL(hci_suspend_dev);
3351
3352 /* Resume HCI device */
3353 int hci_resume_dev(struct hci_dev *hdev)
3354 {
3355         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3356         return 0;
3357 }
3358 EXPORT_SYMBOL(hci_resume_dev);
3359
3360 /* Reset HCI device */
3361 int hci_reset_dev(struct hci_dev *hdev)
3362 {
3363         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3364         struct sk_buff *skb;
3365
3366         skb = bt_skb_alloc(3, GFP_ATOMIC);
3367         if (!skb)
3368                 return -ENOMEM;
3369
3370         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3371         skb_put_data(skb, hw_err, 3);
3372
3373         /* Send Hardware Error to upper stack */
3374         return hci_recv_frame(hdev, skb);
3375 }
3376 EXPORT_SYMBOL(hci_reset_dev);
3377
3378 /* Receive frame from HCI drivers */
3379 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3380 {
3381         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3382                       && !test_bit(HCI_INIT, &hdev->flags))) {
3383                 kfree_skb(skb);
3384                 return -ENXIO;
3385         }
3386
3387         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3388             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3389             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3390                 kfree_skb(skb);
3391                 return -EINVAL;
3392         }
3393
3394         /* Incoming skb */
3395         bt_cb(skb)->incoming = 1;
3396
3397         /* Time stamp */
3398         __net_timestamp(skb);
3399
3400         skb_queue_tail(&hdev->rx_q, skb);
3401         queue_work(hdev->workqueue, &hdev->rx_work);
3402
3403         return 0;
3404 }
3405 EXPORT_SYMBOL(hci_recv_frame);
3406
3407 /* Receive diagnostic message from HCI drivers */
3408 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3409 {
3410         /* Mark as diagnostic packet */
3411         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3412
3413         /* Time stamp */
3414         __net_timestamp(skb);
3415
3416         skb_queue_tail(&hdev->rx_q, skb);
3417         queue_work(hdev->workqueue, &hdev->rx_work);
3418
3419         return 0;
3420 }
3421 EXPORT_SYMBOL(hci_recv_diag);
3422
3423 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3424 {
3425         va_list vargs;
3426
3427         va_start(vargs, fmt);
3428         kfree_const(hdev->hw_info);
3429         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3430         va_end(vargs);
3431 }
3432 EXPORT_SYMBOL(hci_set_hw_info);
3433
3434 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3435 {
3436         va_list vargs;
3437
3438         va_start(vargs, fmt);
3439         kfree_const(hdev->fw_info);
3440         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3441         va_end(vargs);
3442 }
3443 EXPORT_SYMBOL(hci_set_fw_info);
3444
3445 /* ---- Interface to upper protocols ---- */
3446
3447 int hci_register_cb(struct hci_cb *cb)
3448 {
3449         BT_DBG("%p name %s", cb, cb->name);
3450
3451         mutex_lock(&hci_cb_list_lock);
3452         list_add_tail(&cb->list, &hci_cb_list);
3453         mutex_unlock(&hci_cb_list_lock);
3454
3455         return 0;
3456 }
3457 EXPORT_SYMBOL(hci_register_cb);
3458
3459 int hci_unregister_cb(struct hci_cb *cb)
3460 {
3461         BT_DBG("%p name %s", cb, cb->name);
3462
3463         mutex_lock(&hci_cb_list_lock);
3464         list_del(&cb->list);
3465         mutex_unlock(&hci_cb_list_lock);
3466
3467         return 0;
3468 }
3469 EXPORT_SYMBOL(hci_unregister_cb);
3470
3471 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3472 {
3473         int err;
3474
3475         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3476                skb->len);
3477
3478         /* Time stamp */
3479         __net_timestamp(skb);
3480
3481         /* Send copy to monitor */
3482         hci_send_to_monitor(hdev, skb);
3483
3484         if (atomic_read(&hdev->promisc)) {
3485                 /* Send copy to the sockets */
3486                 hci_send_to_sock(hdev, skb);
3487         }
3488
3489         /* Get rid of skb owner, prior to sending to the driver. */
3490         skb_orphan(skb);
3491
3492         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3493                 kfree_skb(skb);
3494                 return;
3495         }
3496
3497         err = hdev->send(hdev, skb);
3498         if (err < 0) {
3499                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3500                 kfree_skb(skb);
3501         }
3502 }
3503
3504 /* Send HCI command */
3505 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3506                  const void *param)
3507 {
3508         struct sk_buff *skb;
3509
3510         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3511
3512         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3513         if (!skb) {
3514                 bt_dev_err(hdev, "no memory for command");
3515                 return -ENOMEM;
3516         }
3517
3518         /* Stand-alone HCI commands must be flagged as
3519          * single-command requests.
3520          */
3521         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3522
3523         skb_queue_tail(&hdev->cmd_q, skb);
3524         queue_work(hdev->workqueue, &hdev->cmd_work);
3525
3526         return 0;
3527 }
3528
3529 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3530                    const void *param)
3531 {
3532         struct sk_buff *skb;
3533
3534         if (hci_opcode_ogf(opcode) != 0x3f) {
3535                 /* A controller receiving a command shall respond with either
3536                  * a Command Status Event or a Command Complete Event.
3537                  * Therefore, all standard HCI commands must be sent via the
3538                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3539                  * Some vendors do not comply with this rule for vendor-specific
3540                  * commands and do not return any event. We want to support
3541                  * unresponded commands for such cases only.
3542                  */
3543                 bt_dev_err(hdev, "unresponded command not supported");
3544                 return -EINVAL;
3545         }
3546
3547         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3548         if (!skb) {
3549                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3550                            opcode);
3551                 return -ENOMEM;
3552         }
3553
3554         hci_send_frame(hdev, skb);
3555
3556         return 0;
3557 }
3558 EXPORT_SYMBOL(__hci_cmd_send);
3559
3560 /* Get data from the previously sent command */
3561 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3562 {
3563         struct hci_command_hdr *hdr;
3564
3565         if (!hdev->sent_cmd)
3566                 return NULL;
3567
3568         hdr = (void *) hdev->sent_cmd->data;
3569
3570         if (hdr->opcode != cpu_to_le16(opcode))
3571                 return NULL;
3572
3573         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3574
3575         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3576 }
3577
3578 /* Send HCI command and wait for command commplete event */
3579 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3580                              const void *param, u32 timeout)
3581 {
3582         struct sk_buff *skb;
3583
3584         if (!test_bit(HCI_UP, &hdev->flags))
3585                 return ERR_PTR(-ENETDOWN);
3586
3587         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3588
3589         hci_req_sync_lock(hdev);
3590         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3591         hci_req_sync_unlock(hdev);
3592
3593         return skb;
3594 }
3595 EXPORT_SYMBOL(hci_cmd_sync);
3596
3597 /* Send ACL data */
3598 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3599 {
3600         struct hci_acl_hdr *hdr;
3601         int len = skb->len;
3602
3603         skb_push(skb, HCI_ACL_HDR_SIZE);
3604         skb_reset_transport_header(skb);
3605         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3606         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3607         hdr->dlen   = cpu_to_le16(len);
3608 }
3609
3610 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3611                           struct sk_buff *skb, __u16 flags)
3612 {
3613         struct hci_conn *conn = chan->conn;
3614         struct hci_dev *hdev = conn->hdev;
3615         struct sk_buff *list;
3616
3617         skb->len = skb_headlen(skb);
3618         skb->data_len = 0;
3619
3620         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3621
3622         switch (hdev->dev_type) {
3623         case HCI_PRIMARY:
3624                 hci_add_acl_hdr(skb, conn->handle, flags);
3625                 break;
3626         case HCI_AMP:
3627                 hci_add_acl_hdr(skb, chan->handle, flags);
3628                 break;
3629         default:
3630                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3631                 return;
3632         }
3633
3634         list = skb_shinfo(skb)->frag_list;
3635         if (!list) {
3636                 /* Non fragmented */
3637                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3638
3639                 skb_queue_tail(queue, skb);
3640         } else {
3641                 /* Fragmented */
3642                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3643
3644                 skb_shinfo(skb)->frag_list = NULL;
3645
3646                 /* Queue all fragments atomically. We need to use spin_lock_bh
3647                  * here because of 6LoWPAN links, as there this function is
3648                  * called from softirq and using normal spin lock could cause
3649                  * deadlocks.
3650                  */
3651                 spin_lock_bh(&queue->lock);
3652
3653                 __skb_queue_tail(queue, skb);
3654
3655                 flags &= ~ACL_START;
3656                 flags |= ACL_CONT;
3657                 do {
3658                         skb = list; list = list->next;
3659
3660                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3661                         hci_add_acl_hdr(skb, conn->handle, flags);
3662
3663                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3664
3665                         __skb_queue_tail(queue, skb);
3666                 } while (list);
3667
3668                 spin_unlock_bh(&queue->lock);
3669         }
3670 }
3671
3672 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3673 {
3674         struct hci_dev *hdev = chan->conn->hdev;
3675
3676         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3677
3678         hci_queue_acl(chan, &chan->data_q, skb, flags);
3679
3680         queue_work(hdev->workqueue, &hdev->tx_work);
3681 }
3682
3683 /* Send SCO data */
3684 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3685 {
3686         struct hci_dev *hdev = conn->hdev;
3687         struct hci_sco_hdr hdr;
3688
3689         BT_DBG("%s len %d", hdev->name, skb->len);
3690
3691         hdr.handle = cpu_to_le16(conn->handle);
3692         hdr.dlen   = skb->len;
3693
3694         skb_push(skb, HCI_SCO_HDR_SIZE);
3695         skb_reset_transport_header(skb);
3696         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3697
3698         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3699
3700         skb_queue_tail(&conn->data_q, skb);
3701         queue_work(hdev->workqueue, &hdev->tx_work);
3702 }
3703
3704 /* ---- HCI TX task (outgoing data) ---- */
3705
3706 /* HCI Connection scheduler */
3707 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3708                                      int *quote)
3709 {
3710         struct hci_conn_hash *h = &hdev->conn_hash;
3711         struct hci_conn *conn = NULL, *c;
3712         unsigned int num = 0, min = ~0;
3713
3714         /* We don't have to lock device here. Connections are always
3715          * added and removed with TX task disabled. */
3716
3717         rcu_read_lock();
3718
3719         list_for_each_entry_rcu(c, &h->list, list) {
3720                 if (c->type != type || skb_queue_empty(&c->data_q))
3721                         continue;
3722
3723                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3724                         continue;
3725
3726                 num++;
3727
3728                 if (c->sent < min) {
3729                         min  = c->sent;
3730                         conn = c;
3731                 }
3732
3733                 if (hci_conn_num(hdev, type) == num)
3734                         break;
3735         }
3736
3737         rcu_read_unlock();
3738
3739         if (conn) {
3740                 int cnt, q;
3741
3742                 switch (conn->type) {
3743                 case ACL_LINK:
3744                         cnt = hdev->acl_cnt;
3745                         break;
3746                 case SCO_LINK:
3747                 case ESCO_LINK:
3748                         cnt = hdev->sco_cnt;
3749                         break;
3750                 case LE_LINK:
3751                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3752                         break;
3753                 default:
3754                         cnt = 0;
3755                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3756                 }
3757
3758                 q = cnt / num;
3759                 *quote = q ? q : 1;
3760         } else
3761                 *quote = 0;
3762
3763         BT_DBG("conn %p quote %d", conn, *quote);
3764         return conn;
3765 }
3766
3767 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3768 {
3769         struct hci_conn_hash *h = &hdev->conn_hash;
3770         struct hci_conn *c;
3771
3772         bt_dev_err(hdev, "link tx timeout");
3773
3774         rcu_read_lock();
3775
3776         /* Kill stalled connections */
3777         list_for_each_entry_rcu(c, &h->list, list) {
3778                 if (c->type == type && c->sent) {
3779                         bt_dev_err(hdev, "killing stalled connection %pMR",
3780                                    &c->dst);
3781                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3782                 }
3783         }
3784
3785         rcu_read_unlock();
3786 }
3787
3788 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3789                                       int *quote)
3790 {
3791         struct hci_conn_hash *h = &hdev->conn_hash;
3792         struct hci_chan *chan = NULL;
3793         unsigned int num = 0, min = ~0, cur_prio = 0;
3794         struct hci_conn *conn;
3795         int cnt, q, conn_num = 0;
3796
3797         BT_DBG("%s", hdev->name);
3798
3799         rcu_read_lock();
3800
3801         list_for_each_entry_rcu(conn, &h->list, list) {
3802                 struct hci_chan *tmp;
3803
3804                 if (conn->type != type)
3805                         continue;
3806
3807                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3808                         continue;
3809
3810                 conn_num++;
3811
3812                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3813                         struct sk_buff *skb;
3814
3815                         if (skb_queue_empty(&tmp->data_q))
3816                                 continue;
3817
3818                         skb = skb_peek(&tmp->data_q);
3819                         if (skb->priority < cur_prio)
3820                                 continue;
3821
3822                         if (skb->priority > cur_prio) {
3823                                 num = 0;
3824                                 min = ~0;
3825                                 cur_prio = skb->priority;
3826                         }
3827
3828                         num++;
3829
3830                         if (conn->sent < min) {
3831                                 min  = conn->sent;
3832                                 chan = tmp;
3833                         }
3834                 }
3835
3836                 if (hci_conn_num(hdev, type) == conn_num)
3837                         break;
3838         }
3839
3840         rcu_read_unlock();
3841
3842         if (!chan)
3843                 return NULL;
3844
3845         switch (chan->conn->type) {
3846         case ACL_LINK:
3847                 cnt = hdev->acl_cnt;
3848                 break;
3849         case AMP_LINK:
3850                 cnt = hdev->block_cnt;
3851                 break;
3852         case SCO_LINK:
3853         case ESCO_LINK:
3854                 cnt = hdev->sco_cnt;
3855                 break;
3856         case LE_LINK:
3857                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3858                 break;
3859         default:
3860                 cnt = 0;
3861                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3862         }
3863
3864         q = cnt / num;
3865         *quote = q ? q : 1;
3866         BT_DBG("chan %p quote %d", chan, *quote);
3867         return chan;
3868 }
3869
3870 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3871 {
3872         struct hci_conn_hash *h = &hdev->conn_hash;
3873         struct hci_conn *conn;
3874         int num = 0;
3875
3876         BT_DBG("%s", hdev->name);
3877
3878         rcu_read_lock();
3879
3880         list_for_each_entry_rcu(conn, &h->list, list) {
3881                 struct hci_chan *chan;
3882
3883                 if (conn->type != type)
3884                         continue;
3885
3886                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3887                         continue;
3888
3889                 num++;
3890
3891                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3892                         struct sk_buff *skb;
3893
3894                         if (chan->sent) {
3895                                 chan->sent = 0;
3896                                 continue;
3897                         }
3898
3899                         if (skb_queue_empty(&chan->data_q))
3900                                 continue;
3901
3902                         skb = skb_peek(&chan->data_q);
3903                         if (skb->priority >= HCI_PRIO_MAX - 1)
3904                                 continue;
3905
3906                         skb->priority = HCI_PRIO_MAX - 1;
3907
3908                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3909                                skb->priority);
3910                 }
3911
3912                 if (hci_conn_num(hdev, type) == num)
3913                         break;
3914         }
3915
3916         rcu_read_unlock();
3917
3918 }
3919
3920 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3921 {
3922         /* Calculate count of blocks used by this packet */
3923         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3924 }
3925
3926 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3927 {
3928         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3929                 /* ACL tx timeout must be longer than maximum
3930                  * link supervision timeout (40.9 seconds) */
3931                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3932                                        HCI_ACL_TX_TIMEOUT))
3933                         hci_link_tx_to(hdev, ACL_LINK);
3934         }
3935 }
3936
3937 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3938 {
3939         unsigned int cnt = hdev->acl_cnt;
3940         struct hci_chan *chan;
3941         struct sk_buff *skb;
3942         int quote;
3943
3944         __check_timeout(hdev, cnt);
3945
3946         while (hdev->acl_cnt &&
3947                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3948                 u32 priority = (skb_peek(&chan->data_q))->priority;
3949                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3950                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3951                                skb->len, skb->priority);
3952
3953                         /* Stop if priority has changed */
3954                         if (skb->priority < priority)
3955                                 break;
3956
3957                         skb = skb_dequeue(&chan->data_q);
3958
3959                         hci_conn_enter_active_mode(chan->conn,
3960                                                    bt_cb(skb)->force_active);
3961
3962                         hci_send_frame(hdev, skb);
3963                         hdev->acl_last_tx = jiffies;
3964
3965                         hdev->acl_cnt--;
3966                         chan->sent++;
3967                         chan->conn->sent++;
3968                 }
3969         }
3970
3971         if (cnt != hdev->acl_cnt)
3972                 hci_prio_recalculate(hdev, ACL_LINK);
3973 }
3974
3975 static void hci_sched_acl_blk(struct hci_dev *hdev)
3976 {
3977         unsigned int cnt = hdev->block_cnt;
3978         struct hci_chan *chan;
3979         struct sk_buff *skb;
3980         int quote;
3981         u8 type;
3982
3983         __check_timeout(hdev, cnt);
3984
3985         BT_DBG("%s", hdev->name);
3986
3987         if (hdev->dev_type == HCI_AMP)
3988                 type = AMP_LINK;
3989         else
3990                 type = ACL_LINK;
3991
3992         while (hdev->block_cnt > 0 &&
3993                (chan = hci_chan_sent(hdev, type, &quote))) {
3994                 u32 priority = (skb_peek(&chan->data_q))->priority;
3995                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3996                         int blocks;
3997
3998                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3999                                skb->len, skb->priority);
4000
4001                         /* Stop if priority has changed */
4002                         if (skb->priority < priority)
4003                                 break;
4004
4005                         skb = skb_dequeue(&chan->data_q);
4006
4007                         blocks = __get_blocks(hdev, skb);
4008                         if (blocks > hdev->block_cnt)
4009                                 return;
4010
4011                         hci_conn_enter_active_mode(chan->conn,
4012                                                    bt_cb(skb)->force_active);
4013
4014                         hci_send_frame(hdev, skb);
4015                         hdev->acl_last_tx = jiffies;
4016
4017                         hdev->block_cnt -= blocks;
4018                         quote -= blocks;
4019
4020                         chan->sent += blocks;
4021                         chan->conn->sent += blocks;
4022                 }
4023         }
4024
4025         if (cnt != hdev->block_cnt)
4026                 hci_prio_recalculate(hdev, type);
4027 }
4028
4029 static void hci_sched_acl(struct hci_dev *hdev)
4030 {
4031         BT_DBG("%s", hdev->name);
4032
4033         /* No ACL link over BR/EDR controller */
4034         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4035                 return;
4036
4037         /* No AMP link over AMP controller */
4038         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4039                 return;
4040
4041         switch (hdev->flow_ctl_mode) {
4042         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4043                 hci_sched_acl_pkt(hdev);
4044                 break;
4045
4046         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4047                 hci_sched_acl_blk(hdev);
4048                 break;
4049         }
4050 }
4051
4052 /* Schedule SCO */
4053 static void hci_sched_sco(struct hci_dev *hdev)
4054 {
4055         struct hci_conn *conn;
4056         struct sk_buff *skb;
4057         int quote;
4058
4059         BT_DBG("%s", hdev->name);
4060
4061         if (!hci_conn_num(hdev, SCO_LINK))
4062                 return;
4063
4064         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4065                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4066                         BT_DBG("skb %p len %d", skb, skb->len);
4067                         hci_send_frame(hdev, skb);
4068
4069                         conn->sent++;
4070                         if (conn->sent == ~0)
4071                                 conn->sent = 0;
4072                 }
4073         }
4074 }
4075
4076 static void hci_sched_esco(struct hci_dev *hdev)
4077 {
4078         struct hci_conn *conn;
4079         struct sk_buff *skb;
4080         int quote;
4081
4082         BT_DBG("%s", hdev->name);
4083
4084         if (!hci_conn_num(hdev, ESCO_LINK))
4085                 return;
4086
4087         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4088                                                      &quote))) {
4089                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4090                         BT_DBG("skb %p len %d", skb, skb->len);
4091                         hci_send_frame(hdev, skb);
4092
4093                         conn->sent++;
4094                         if (conn->sent == ~0)
4095                                 conn->sent = 0;
4096                 }
4097         }
4098 }
4099
4100 static void hci_sched_le(struct hci_dev *hdev)
4101 {
4102         struct hci_chan *chan;
4103         struct sk_buff *skb;
4104         int quote, cnt, tmp;
4105
4106         BT_DBG("%s", hdev->name);
4107
4108         if (!hci_conn_num(hdev, LE_LINK))
4109                 return;
4110
4111         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4112                 /* LE tx timeout must be longer than maximum
4113                  * link supervision timeout (40.9 seconds) */
4114                 if (!hdev->le_cnt && hdev->le_pkts &&
4115                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4116                         hci_link_tx_to(hdev, LE_LINK);
4117         }
4118
4119         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4120         tmp = cnt;
4121         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4122                 u32 priority = (skb_peek(&chan->data_q))->priority;
4123                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4124                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4125                                skb->len, skb->priority);
4126
4127                         /* Stop if priority has changed */
4128                         if (skb->priority < priority)
4129                                 break;
4130
4131                         skb = skb_dequeue(&chan->data_q);
4132
4133                         hci_send_frame(hdev, skb);
4134                         hdev->le_last_tx = jiffies;
4135
4136                         cnt--;
4137                         chan->sent++;
4138                         chan->conn->sent++;
4139                 }
4140         }
4141
4142         if (hdev->le_pkts)
4143                 hdev->le_cnt = cnt;
4144         else
4145                 hdev->acl_cnt = cnt;
4146
4147         if (cnt != tmp)
4148                 hci_prio_recalculate(hdev, LE_LINK);
4149 }
4150
4151 static void hci_tx_work(struct work_struct *work)
4152 {
4153         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4154         struct sk_buff *skb;
4155
4156         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4157                hdev->sco_cnt, hdev->le_cnt);
4158
4159         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4160                 /* Schedule queues and send stuff to HCI driver */
4161                 hci_sched_acl(hdev);
4162                 hci_sched_sco(hdev);
4163                 hci_sched_esco(hdev);
4164                 hci_sched_le(hdev);
4165         }
4166
4167         /* Send next queued raw (unknown type) packet */
4168         while ((skb = skb_dequeue(&hdev->raw_q)))
4169                 hci_send_frame(hdev, skb);
4170 }
4171
4172 /* ----- HCI RX task (incoming data processing) ----- */
4173
4174 /* ACL data packet */
4175 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4176 {
4177         struct hci_acl_hdr *hdr = (void *) skb->data;
4178         struct hci_conn *conn;
4179         __u16 handle, flags;
4180
4181         skb_pull(skb, HCI_ACL_HDR_SIZE);
4182
4183         handle = __le16_to_cpu(hdr->handle);
4184         flags  = hci_flags(handle);
4185         handle = hci_handle(handle);
4186
4187         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4188                handle, flags);
4189
4190         hdev->stat.acl_rx++;
4191
4192         hci_dev_lock(hdev);
4193         conn = hci_conn_hash_lookup_handle(hdev, handle);
4194         hci_dev_unlock(hdev);
4195
4196         if (conn) {
4197                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4198
4199                 /* Send to upper protocol */
4200                 l2cap_recv_acldata(conn, skb, flags);
4201                 return;
4202         } else {
4203                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4204                            handle);
4205         }
4206
4207         kfree_skb(skb);
4208 }
4209
4210 /* SCO data packet */
4211 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4212 {
4213         struct hci_sco_hdr *hdr = (void *) skb->data;
4214         struct hci_conn *conn;
4215         __u16 handle;
4216
4217         skb_pull(skb, HCI_SCO_HDR_SIZE);
4218
4219         handle = __le16_to_cpu(hdr->handle);
4220
4221         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4222
4223         hdev->stat.sco_rx++;
4224
4225         hci_dev_lock(hdev);
4226         conn = hci_conn_hash_lookup_handle(hdev, handle);
4227         hci_dev_unlock(hdev);
4228
4229         if (conn) {
4230                 /* Send to upper protocol */
4231                 sco_recv_scodata(conn, skb);
4232                 return;
4233         } else {
4234                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4235                            handle);
4236         }
4237
4238         kfree_skb(skb);
4239 }
4240
4241 static bool hci_req_is_complete(struct hci_dev *hdev)
4242 {
4243         struct sk_buff *skb;
4244
4245         skb = skb_peek(&hdev->cmd_q);
4246         if (!skb)
4247                 return true;
4248
4249         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4250 }
4251
4252 static void hci_resend_last(struct hci_dev *hdev)
4253 {
4254         struct hci_command_hdr *sent;
4255         struct sk_buff *skb;
4256         u16 opcode;
4257
4258         if (!hdev->sent_cmd)
4259                 return;
4260
4261         sent = (void *) hdev->sent_cmd->data;
4262         opcode = __le16_to_cpu(sent->opcode);
4263         if (opcode == HCI_OP_RESET)
4264                 return;
4265
4266         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4267         if (!skb)
4268                 return;
4269
4270         skb_queue_head(&hdev->cmd_q, skb);
4271         queue_work(hdev->workqueue, &hdev->cmd_work);
4272 }
4273
4274 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4275                           hci_req_complete_t *req_complete,
4276                           hci_req_complete_skb_t *req_complete_skb)
4277 {
4278         struct sk_buff *skb;
4279         unsigned long flags;
4280
4281         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4282
4283         /* If the completed command doesn't match the last one that was
4284          * sent we need to do special handling of it.
4285          */
4286         if (!hci_sent_cmd_data(hdev, opcode)) {
4287                 /* Some CSR based controllers generate a spontaneous
4288                  * reset complete event during init and any pending
4289                  * command will never be completed. In such a case we
4290                  * need to resend whatever was the last sent
4291                  * command.
4292                  */
4293                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4294                         hci_resend_last(hdev);
4295
4296                 return;
4297         }
4298
4299         /* If we reach this point this event matches the last command sent */
4300         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4301
4302         /* If the command succeeded and there's still more commands in
4303          * this request the request is not yet complete.
4304          */
4305         if (!status && !hci_req_is_complete(hdev))
4306                 return;
4307
4308         /* If this was the last command in a request the complete
4309          * callback would be found in hdev->sent_cmd instead of the
4310          * command queue (hdev->cmd_q).
4311          */
4312         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4313                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4314                 return;
4315         }
4316
4317         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4318                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4319                 return;
4320         }
4321
4322         /* Remove all pending commands belonging to this request */
4323         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4324         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4325                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4326                         __skb_queue_head(&hdev->cmd_q, skb);
4327                         break;
4328                 }
4329
4330                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4331                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4332                 else
4333                         *req_complete = bt_cb(skb)->hci.req_complete;
4334                 kfree_skb(skb);
4335         }
4336         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4337 }
4338
4339 static void hci_rx_work(struct work_struct *work)
4340 {
4341         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4342         struct sk_buff *skb;
4343
4344         BT_DBG("%s", hdev->name);
4345
4346         while ((skb = skb_dequeue(&hdev->rx_q))) {
4347                 /* Send copy to monitor */
4348                 hci_send_to_monitor(hdev, skb);
4349
4350                 if (atomic_read(&hdev->promisc)) {
4351                         /* Send copy to the sockets */
4352                         hci_send_to_sock(hdev, skb);
4353                 }
4354
4355                 /* If the device has been opened in HCI_USER_CHANNEL,
4356                  * the userspace has exclusive access to device.
4357                  * When device is HCI_INIT, we still need to process
4358                  * the data packets to the driver in order
4359                  * to complete its setup().
4360                  */
4361                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4362                     !test_bit(HCI_INIT, &hdev->flags)) {
4363                         kfree_skb(skb);
4364                         continue;
4365                 }
4366
4367                 if (test_bit(HCI_INIT, &hdev->flags)) {
4368                         /* Don't process data packets in this states. */
4369                         switch (hci_skb_pkt_type(skb)) {
4370                         case HCI_ACLDATA_PKT:
4371                         case HCI_SCODATA_PKT:
4372                                 kfree_skb(skb);
4373                                 continue;
4374                         }
4375                 }
4376
4377                 /* Process frame */
4378                 switch (hci_skb_pkt_type(skb)) {
4379                 case HCI_EVENT_PKT:
4380                         BT_DBG("%s Event packet", hdev->name);
4381                         hci_event_packet(hdev, skb);
4382                         break;
4383
4384                 case HCI_ACLDATA_PKT:
4385                         BT_DBG("%s ACL data packet", hdev->name);
4386                         hci_acldata_packet(hdev, skb);
4387                         break;
4388
4389                 case HCI_SCODATA_PKT:
4390                         BT_DBG("%s SCO data packet", hdev->name);
4391                         hci_scodata_packet(hdev, skb);
4392                         break;
4393
4394                 default:
4395                         kfree_skb(skb);
4396                         break;
4397                 }
4398         }
4399 }
4400
4401 static void hci_cmd_work(struct work_struct *work)
4402 {
4403         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4404         struct sk_buff *skb;
4405
4406         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4407                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4408
4409         /* Send queued commands */
4410         if (atomic_read(&hdev->cmd_cnt)) {
4411                 skb = skb_dequeue(&hdev->cmd_q);
4412                 if (!skb)
4413                         return;
4414
4415                 kfree_skb(hdev->sent_cmd);
4416
4417                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4418                 if (hdev->sent_cmd) {
4419                         if (hci_req_status_pend(hdev))
4420                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4421                         atomic_dec(&hdev->cmd_cnt);
4422                         hci_send_frame(hdev, skb);
4423                         if (test_bit(HCI_RESET, &hdev->flags))
4424                                 cancel_delayed_work(&hdev->cmd_timer);
4425                         else
4426                                 schedule_delayed_work(&hdev->cmd_timer,
4427                                                       HCI_CMD_TIMEOUT);
4428                 } else {
4429                         skb_queue_head(&hdev->cmd_q, skb);
4430                         queue_work(hdev->workqueue, &hdev->cmd_work);
4431                 }
4432         }
4433 }