GNU Linux-libre 4.14.266-gnu1
[releases.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         kfree_skb(hdev->req_skb);
266         hdev->req_skb = NULL;
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275                                                   unsigned long opt),
276                  unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278         int ret;
279
280         /* Serialize all requests */
281         hci_req_sync_lock(hdev);
282         /* check the state after obtaing the lock to protect the HCI_UP
283          * against any races from hci_dev_do_close when the controller
284          * gets removed.
285          */
286         if (test_bit(HCI_UP, &hdev->flags))
287                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
288         else
289                 ret = -ENETDOWN;
290         hci_req_sync_unlock(hdev);
291
292         return ret;
293 }
294
295 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
296                                 const void *param)
297 {
298         int len = HCI_COMMAND_HDR_SIZE + plen;
299         struct hci_command_hdr *hdr;
300         struct sk_buff *skb;
301
302         skb = bt_skb_alloc(len, GFP_ATOMIC);
303         if (!skb)
304                 return NULL;
305
306         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
307         hdr->opcode = cpu_to_le16(opcode);
308         hdr->plen   = plen;
309
310         if (plen)
311                 skb_put_data(skb, param, plen);
312
313         BT_DBG("skb len %d", skb->len);
314
315         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
316         hci_skb_opcode(skb) = opcode;
317
318         return skb;
319 }
320
321 /* Queue a command to an asynchronous HCI request */
322 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
323                     const void *param, u8 event)
324 {
325         struct hci_dev *hdev = req->hdev;
326         struct sk_buff *skb;
327
328         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
329
330         /* If an error occurred during request building, there is no point in
331          * queueing the HCI command. We can simply return.
332          */
333         if (req->err)
334                 return;
335
336         skb = hci_prepare_cmd(hdev, opcode, plen, param);
337         if (!skb) {
338                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
339                        hdev->name, opcode);
340                 req->err = -ENOMEM;
341                 return;
342         }
343
344         if (skb_queue_empty(&req->cmd_q))
345                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
346
347         bt_cb(skb)->hci.req_event = event;
348
349         skb_queue_tail(&req->cmd_q, skb);
350 }
351
352 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
353                  const void *param)
354 {
355         hci_req_add_ev(req, opcode, plen, param, 0);
356 }
357
358 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
359 {
360         struct hci_dev *hdev = req->hdev;
361         struct hci_cp_write_page_scan_activity acp;
362         u8 type;
363
364         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
365                 return;
366
367         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
368                 return;
369
370         if (enable) {
371                 type = PAGE_SCAN_TYPE_INTERLACED;
372
373                 /* 160 msec page scan interval */
374                 acp.interval = cpu_to_le16(0x0100);
375         } else {
376                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
377
378                 /* default 1.28 sec page scan */
379                 acp.interval = cpu_to_le16(0x0800);
380         }
381
382         acp.window = cpu_to_le16(0x0012);
383
384         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
385             __cpu_to_le16(hdev->page_scan_window) != acp.window)
386                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
387                             sizeof(acp), &acp);
388
389         if (hdev->page_scan_type != type)
390                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
391 }
392
393 /* This function controls the background scanning based on hdev->pend_le_conns
394  * list. If there are pending LE connection we start the background scanning,
395  * otherwise we stop it.
396  *
397  * This function requires the caller holds hdev->lock.
398  */
399 static void __hci_update_background_scan(struct hci_request *req)
400 {
401         struct hci_dev *hdev = req->hdev;
402
403         if (!test_bit(HCI_UP, &hdev->flags) ||
404             test_bit(HCI_INIT, &hdev->flags) ||
405             hci_dev_test_flag(hdev, HCI_SETUP) ||
406             hci_dev_test_flag(hdev, HCI_CONFIG) ||
407             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
408             hci_dev_test_flag(hdev, HCI_UNREGISTER))
409                 return;
410
411         /* No point in doing scanning if LE support hasn't been enabled */
412         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
413                 return;
414
415         /* If discovery is active don't interfere with it */
416         if (hdev->discovery.state != DISCOVERY_STOPPED)
417                 return;
418
419         /* Reset RSSI and UUID filters when starting background scanning
420          * since these filters are meant for service discovery only.
421          *
422          * The Start Discovery and Start Service Discovery operations
423          * ensure to set proper values for RSSI threshold and UUID
424          * filter list. So it is safe to just reset them here.
425          */
426         hci_discovery_filter_clear(hdev);
427
428         if (list_empty(&hdev->pend_le_conns) &&
429             list_empty(&hdev->pend_le_reports)) {
430                 /* If there is no pending LE connections or devices
431                  * to be scanned for, we should stop the background
432                  * scanning.
433                  */
434
435                 /* If controller is not scanning we are done. */
436                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
437                         return;
438
439                 hci_req_add_le_scan_disable(req);
440
441                 BT_DBG("%s stopping background scanning", hdev->name);
442         } else {
443                 /* If there is at least one pending LE connection, we should
444                  * keep the background scan running.
445                  */
446
447                 /* If controller is connecting, we should not start scanning
448                  * since some controllers are not able to scan and connect at
449                  * the same time.
450                  */
451                 if (hci_lookup_le_connect(hdev))
452                         return;
453
454                 /* If controller is currently scanning, we stop it to ensure we
455                  * don't miss any advertising (due to duplicates filter).
456                  */
457                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
458                         hci_req_add_le_scan_disable(req);
459
460                 hci_req_add_le_passive_scan(req);
461
462                 BT_DBG("%s starting background scanning", hdev->name);
463         }
464 }
465
466 void __hci_req_update_name(struct hci_request *req)
467 {
468         struct hci_dev *hdev = req->hdev;
469         struct hci_cp_write_local_name cp;
470
471         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
472
473         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
474 }
475
476 #define PNP_INFO_SVCLASS_ID             0x1200
477
478 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
479 {
480         u8 *ptr = data, *uuids_start = NULL;
481         struct bt_uuid *uuid;
482
483         if (len < 4)
484                 return ptr;
485
486         list_for_each_entry(uuid, &hdev->uuids, list) {
487                 u16 uuid16;
488
489                 if (uuid->size != 16)
490                         continue;
491
492                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
493                 if (uuid16 < 0x1100)
494                         continue;
495
496                 if (uuid16 == PNP_INFO_SVCLASS_ID)
497                         continue;
498
499                 if (!uuids_start) {
500                         uuids_start = ptr;
501                         uuids_start[0] = 1;
502                         uuids_start[1] = EIR_UUID16_ALL;
503                         ptr += 2;
504                 }
505
506                 /* Stop if not enough space to put next UUID */
507                 if ((ptr - data) + sizeof(u16) > len) {
508                         uuids_start[1] = EIR_UUID16_SOME;
509                         break;
510                 }
511
512                 *ptr++ = (uuid16 & 0x00ff);
513                 *ptr++ = (uuid16 & 0xff00) >> 8;
514                 uuids_start[0] += sizeof(uuid16);
515         }
516
517         return ptr;
518 }
519
520 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
521 {
522         u8 *ptr = data, *uuids_start = NULL;
523         struct bt_uuid *uuid;
524
525         if (len < 6)
526                 return ptr;
527
528         list_for_each_entry(uuid, &hdev->uuids, list) {
529                 if (uuid->size != 32)
530                         continue;
531
532                 if (!uuids_start) {
533                         uuids_start = ptr;
534                         uuids_start[0] = 1;
535                         uuids_start[1] = EIR_UUID32_ALL;
536                         ptr += 2;
537                 }
538
539                 /* Stop if not enough space to put next UUID */
540                 if ((ptr - data) + sizeof(u32) > len) {
541                         uuids_start[1] = EIR_UUID32_SOME;
542                         break;
543                 }
544
545                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
546                 ptr += sizeof(u32);
547                 uuids_start[0] += sizeof(u32);
548         }
549
550         return ptr;
551 }
552
553 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
554 {
555         u8 *ptr = data, *uuids_start = NULL;
556         struct bt_uuid *uuid;
557
558         if (len < 18)
559                 return ptr;
560
561         list_for_each_entry(uuid, &hdev->uuids, list) {
562                 if (uuid->size != 128)
563                         continue;
564
565                 if (!uuids_start) {
566                         uuids_start = ptr;
567                         uuids_start[0] = 1;
568                         uuids_start[1] = EIR_UUID128_ALL;
569                         ptr += 2;
570                 }
571
572                 /* Stop if not enough space to put next UUID */
573                 if ((ptr - data) + 16 > len) {
574                         uuids_start[1] = EIR_UUID128_SOME;
575                         break;
576                 }
577
578                 memcpy(ptr, uuid->uuid, 16);
579                 ptr += 16;
580                 uuids_start[0] += 16;
581         }
582
583         return ptr;
584 }
585
586 static void create_eir(struct hci_dev *hdev, u8 *data)
587 {
588         u8 *ptr = data;
589         size_t name_len;
590
591         name_len = strlen(hdev->dev_name);
592
593         if (name_len > 0) {
594                 /* EIR Data type */
595                 if (name_len > 48) {
596                         name_len = 48;
597                         ptr[1] = EIR_NAME_SHORT;
598                 } else
599                         ptr[1] = EIR_NAME_COMPLETE;
600
601                 /* EIR Data length */
602                 ptr[0] = name_len + 1;
603
604                 memcpy(ptr + 2, hdev->dev_name, name_len);
605
606                 ptr += (name_len + 2);
607         }
608
609         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
610                 ptr[0] = 2;
611                 ptr[1] = EIR_TX_POWER;
612                 ptr[2] = (u8) hdev->inq_tx_power;
613
614                 ptr += 3;
615         }
616
617         if (hdev->devid_source > 0) {
618                 ptr[0] = 9;
619                 ptr[1] = EIR_DEVICE_ID;
620
621                 put_unaligned_le16(hdev->devid_source, ptr + 2);
622                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
623                 put_unaligned_le16(hdev->devid_product, ptr + 6);
624                 put_unaligned_le16(hdev->devid_version, ptr + 8);
625
626                 ptr += 10;
627         }
628
629         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
630         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
631         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
632 }
633
634 void __hci_req_update_eir(struct hci_request *req)
635 {
636         struct hci_dev *hdev = req->hdev;
637         struct hci_cp_write_eir cp;
638
639         if (!hdev_is_powered(hdev))
640                 return;
641
642         if (!lmp_ext_inq_capable(hdev))
643                 return;
644
645         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
646                 return;
647
648         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
649                 return;
650
651         memset(&cp, 0, sizeof(cp));
652
653         create_eir(hdev, cp.data);
654
655         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
656                 return;
657
658         memcpy(hdev->eir, cp.data, sizeof(cp.data));
659
660         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
661 }
662
663 void hci_req_add_le_scan_disable(struct hci_request *req)
664 {
665         struct hci_cp_le_set_scan_enable cp;
666
667         memset(&cp, 0, sizeof(cp));
668         cp.enable = LE_SCAN_DISABLE;
669         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
671
672 static void add_to_white_list(struct hci_request *req,
673                               struct hci_conn_params *params)
674 {
675         struct hci_cp_le_add_to_white_list cp;
676
677         cp.bdaddr_type = params->addr_type;
678         bacpy(&cp.bdaddr, &params->addr);
679
680         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
681 }
682
683 static u8 update_white_list(struct hci_request *req)
684 {
685         struct hci_dev *hdev = req->hdev;
686         struct hci_conn_params *params;
687         struct bdaddr_list *b;
688         uint8_t white_list_entries = 0;
689
690         /* Go through the current white list programmed into the
691          * controller one by one and check if that address is still
692          * in the list of pending connections or list of devices to
693          * report. If not present in either list, then queue the
694          * command to remove it from the controller.
695          */
696         list_for_each_entry(b, &hdev->le_white_list, list) {
697                 /* If the device is neither in pend_le_conns nor
698                  * pend_le_reports then remove it from the whitelist.
699                  */
700                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
701                                                &b->bdaddr, b->bdaddr_type) &&
702                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
703                                                &b->bdaddr, b->bdaddr_type)) {
704                         struct hci_cp_le_del_from_white_list cp;
705
706                         cp.bdaddr_type = b->bdaddr_type;
707                         bacpy(&cp.bdaddr, &b->bdaddr);
708
709                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
710                                     sizeof(cp), &cp);
711                         continue;
712                 }
713
714                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
715                         /* White list can not be used with RPAs */
716                         return 0x00;
717                 }
718
719                 white_list_entries++;
720         }
721
722         /* Since all no longer valid white list entries have been
723          * removed, walk through the list of pending connections
724          * and ensure that any new device gets programmed into
725          * the controller.
726          *
727          * If the list of the devices is larger than the list of
728          * available white list entries in the controller, then
729          * just abort and return filer policy value to not use the
730          * white list.
731          */
732         list_for_each_entry(params, &hdev->pend_le_conns, action) {
733                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
734                                            &params->addr, params->addr_type))
735                         continue;
736
737                 if (white_list_entries >= hdev->le_white_list_size) {
738                         /* Select filter policy to accept all advertising */
739                         return 0x00;
740                 }
741
742                 if (hci_find_irk_by_addr(hdev, &params->addr,
743                                          params->addr_type)) {
744                         /* White list can not be used with RPAs */
745                         return 0x00;
746                 }
747
748                 white_list_entries++;
749                 add_to_white_list(req, params);
750         }
751
752         /* After adding all new pending connections, walk through
753          * the list of pending reports and also add these to the
754          * white list if there is still space.
755          */
756         list_for_each_entry(params, &hdev->pend_le_reports, action) {
757                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
758                                            &params->addr, params->addr_type))
759                         continue;
760
761                 if (white_list_entries >= hdev->le_white_list_size) {
762                         /* Select filter policy to accept all advertising */
763                         return 0x00;
764                 }
765
766                 if (hci_find_irk_by_addr(hdev, &params->addr,
767                                          params->addr_type)) {
768                         /* White list can not be used with RPAs */
769                         return 0x00;
770                 }
771
772                 white_list_entries++;
773                 add_to_white_list(req, params);
774         }
775
776         /* Select filter policy to use white list */
777         return 0x01;
778 }
779
780 static bool scan_use_rpa(struct hci_dev *hdev)
781 {
782         return hci_dev_test_flag(hdev, HCI_PRIVACY);
783 }
784
785 void hci_req_add_le_passive_scan(struct hci_request *req)
786 {
787         struct hci_cp_le_set_scan_param param_cp;
788         struct hci_cp_le_set_scan_enable enable_cp;
789         struct hci_dev *hdev = req->hdev;
790         u8 own_addr_type;
791         u8 filter_policy;
792
793         /* Set require_privacy to false since no SCAN_REQ are send
794          * during passive scanning. Not using an non-resolvable address
795          * here is important so that peer devices using direct
796          * advertising with our address will be correctly reported
797          * by the controller.
798          */
799         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
800                                       &own_addr_type))
801                 return;
802
803         /* Adding or removing entries from the white list must
804          * happen before enabling scanning. The controller does
805          * not allow white list modification while scanning.
806          */
807         filter_policy = update_white_list(req);
808
809         /* When the controller is using random resolvable addresses and
810          * with that having LE privacy enabled, then controllers with
811          * Extended Scanner Filter Policies support can now enable support
812          * for handling directed advertising.
813          *
814          * So instead of using filter polices 0x00 (no whitelist)
815          * and 0x01 (whitelist enabled) use the new filter policies
816          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
817          */
818         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
819             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
820                 filter_policy |= 0x02;
821
822         memset(&param_cp, 0, sizeof(param_cp));
823         param_cp.type = LE_SCAN_PASSIVE;
824         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
825         param_cp.window = cpu_to_le16(hdev->le_scan_window);
826         param_cp.own_address_type = own_addr_type;
827         param_cp.filter_policy = filter_policy;
828         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
829                     &param_cp);
830
831         memset(&enable_cp, 0, sizeof(enable_cp));
832         enable_cp.enable = LE_SCAN_ENABLE;
833         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
834         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
835                     &enable_cp);
836 }
837
838 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
839 {
840         u8 instance = hdev->cur_adv_instance;
841         struct adv_info *adv_instance;
842
843         /* Ignore instance 0 */
844         if (instance == 0x00)
845                 return 0;
846
847         adv_instance = hci_find_adv_instance(hdev, instance);
848         if (!adv_instance)
849                 return 0;
850
851         /* TODO: Take into account the "appearance" and "local-name" flags here.
852          * These are currently being ignored as they are not supported.
853          */
854         return adv_instance->scan_rsp_len;
855 }
856
857 void __hci_req_disable_advertising(struct hci_request *req)
858 {
859         u8 enable = 0x00;
860
861         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
862 }
863
864 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
865 {
866         u32 flags;
867         struct adv_info *adv_instance;
868
869         if (instance == 0x00) {
870                 /* Instance 0 always manages the "Tx Power" and "Flags"
871                  * fields
872                  */
873                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
874
875                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
876                  * corresponds to the "connectable" instance flag.
877                  */
878                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
879                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
880
881                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
882                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
883                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884                         flags |= MGMT_ADV_FLAG_DISCOV;
885
886                 return flags;
887         }
888
889         adv_instance = hci_find_adv_instance(hdev, instance);
890
891         /* Return 0 when we got an invalid instance identifier. */
892         if (!adv_instance)
893                 return 0;
894
895         return adv_instance->flags;
896 }
897
898 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
899 {
900         /* If privacy is not enabled don't use RPA */
901         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
902                 return false;
903
904         /* If basic privacy mode is enabled use RPA */
905         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
906                 return true;
907
908         /* If limited privacy mode is enabled don't use RPA if we're
909          * both discoverable and bondable.
910          */
911         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
912             hci_dev_test_flag(hdev, HCI_BONDABLE))
913                 return false;
914
915         /* We're neither bondable nor discoverable in the limited
916          * privacy mode, therefore use RPA.
917          */
918         return true;
919 }
920
921 void __hci_req_enable_advertising(struct hci_request *req)
922 {
923         struct hci_dev *hdev = req->hdev;
924         struct hci_cp_le_set_adv_param cp;
925         u8 own_addr_type, enable = 0x01;
926         bool connectable;
927         u32 flags;
928
929         if (hci_conn_num(hdev, LE_LINK) > 0)
930                 return;
931
932         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
933                 __hci_req_disable_advertising(req);
934
935         /* Clear the HCI_LE_ADV bit temporarily so that the
936          * hci_update_random_address knows that it's safe to go ahead
937          * and write a new random address. The flag will be set back on
938          * as soon as the SET_ADV_ENABLE HCI command completes.
939          */
940         hci_dev_clear_flag(hdev, HCI_LE_ADV);
941
942         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
943
944         /* If the "connectable" instance flag was not set, then choose between
945          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
946          */
947         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
948                       mgmt_get_connectable(hdev);
949
950         /* Set require_privacy to true only when non-connectable
951          * advertising is used. In that case it is fine to use a
952          * non-resolvable private address.
953          */
954         if (hci_update_random_address(req, !connectable,
955                                       adv_use_rpa(hdev, flags),
956                                       &own_addr_type) < 0)
957                 return;
958
959         memset(&cp, 0, sizeof(cp));
960         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
961         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
962
963         if (connectable)
964                 cp.type = LE_ADV_IND;
965         else if (get_cur_adv_instance_scan_rsp_len(hdev))
966                 cp.type = LE_ADV_SCAN_IND;
967         else
968                 cp.type = LE_ADV_NONCONN_IND;
969
970         cp.own_address_type = own_addr_type;
971         cp.channel_map = hdev->le_adv_channel_map;
972
973         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
974
975         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
976 }
977
978 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
979 {
980         size_t short_len;
981         size_t complete_len;
982
983         /* no space left for name (+ NULL + type + len) */
984         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
985                 return ad_len;
986
987         /* use complete name if present and fits */
988         complete_len = strlen(hdev->dev_name);
989         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
990                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
991                                        hdev->dev_name, complete_len + 1);
992
993         /* use short name if present */
994         short_len = strlen(hdev->short_name);
995         if (short_len)
996                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
997                                        hdev->short_name, short_len + 1);
998
999         /* use shortened full name if present, we already know that name
1000          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1001          */
1002         if (complete_len) {
1003                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1004
1005                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1006                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1007
1008                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1009                                        sizeof(name));
1010         }
1011
1012         return ad_len;
1013 }
1014
1015 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1016 {
1017         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1018 }
1019
1020 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1021 {
1022         u8 scan_rsp_len = 0;
1023
1024         if (hdev->appearance) {
1025                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1026         }
1027
1028         return append_local_name(hdev, ptr, scan_rsp_len);
1029 }
1030
1031 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1032                                         u8 *ptr)
1033 {
1034         struct adv_info *adv_instance;
1035         u32 instance_flags;
1036         u8 scan_rsp_len = 0;
1037
1038         adv_instance = hci_find_adv_instance(hdev, instance);
1039         if (!adv_instance)
1040                 return 0;
1041
1042         instance_flags = adv_instance->flags;
1043
1044         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1045                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1046         }
1047
1048         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1049                adv_instance->scan_rsp_len);
1050
1051         scan_rsp_len += adv_instance->scan_rsp_len;
1052
1053         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1054                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1055
1056         return scan_rsp_len;
1057 }
1058
1059 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1060 {
1061         struct hci_dev *hdev = req->hdev;
1062         struct hci_cp_le_set_scan_rsp_data cp;
1063         u8 len;
1064
1065         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1066                 return;
1067
1068         memset(&cp, 0, sizeof(cp));
1069
1070         if (instance)
1071                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1072         else
1073                 len = create_default_scan_rsp_data(hdev, cp.data);
1074
1075         if (hdev->scan_rsp_data_len == len &&
1076             !memcmp(cp.data, hdev->scan_rsp_data, len))
1077                 return;
1078
1079         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1080         hdev->scan_rsp_data_len = len;
1081
1082         cp.length = len;
1083
1084         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1085 }
1086
1087 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1088 {
1089         struct adv_info *adv_instance = NULL;
1090         u8 ad_len = 0, flags = 0;
1091         u32 instance_flags;
1092
1093         /* Return 0 when the current instance identifier is invalid. */
1094         if (instance) {
1095                 adv_instance = hci_find_adv_instance(hdev, instance);
1096                 if (!adv_instance)
1097                         return 0;
1098         }
1099
1100         instance_flags = get_adv_instance_flags(hdev, instance);
1101
1102         /* If instance already has the flags set skip adding it once
1103          * again.
1104          */
1105         if (adv_instance && eir_get_data(adv_instance->adv_data,
1106                                          adv_instance->adv_data_len, EIR_FLAGS,
1107                                          NULL))
1108                 goto skip_flags;
1109
1110         /* The Add Advertising command allows userspace to set both the general
1111          * and limited discoverable flags.
1112          */
1113         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1114                 flags |= LE_AD_GENERAL;
1115
1116         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1117                 flags |= LE_AD_LIMITED;
1118
1119         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1120                 flags |= LE_AD_NO_BREDR;
1121
1122         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1123                 /* If a discovery flag wasn't provided, simply use the global
1124                  * settings.
1125                  */
1126                 if (!flags)
1127                         flags |= mgmt_get_adv_discov_flags(hdev);
1128
1129                 /* If flags would still be empty, then there is no need to
1130                  * include the "Flags" AD field".
1131                  */
1132                 if (flags) {
1133                         ptr[0] = 0x02;
1134                         ptr[1] = EIR_FLAGS;
1135                         ptr[2] = flags;
1136
1137                         ad_len += 3;
1138                         ptr += 3;
1139                 }
1140         }
1141
1142 skip_flags:
1143         if (adv_instance) {
1144                 memcpy(ptr, adv_instance->adv_data,
1145                        adv_instance->adv_data_len);
1146                 ad_len += adv_instance->adv_data_len;
1147                 ptr += adv_instance->adv_data_len;
1148         }
1149
1150         /* Provide Tx Power only if we can provide a valid value for it */
1151         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1152             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1153                 ptr[0] = 0x02;
1154                 ptr[1] = EIR_TX_POWER;
1155                 ptr[2] = (u8)hdev->adv_tx_power;
1156
1157                 ad_len += 3;
1158                 ptr += 3;
1159         }
1160
1161         return ad_len;
1162 }
1163
1164 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1165 {
1166         struct hci_dev *hdev = req->hdev;
1167         struct hci_cp_le_set_adv_data cp;
1168         u8 len;
1169
1170         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1171                 return;
1172
1173         memset(&cp, 0, sizeof(cp));
1174
1175         len = create_instance_adv_data(hdev, instance, cp.data);
1176
1177         /* There's nothing to do if the data hasn't changed */
1178         if (hdev->adv_data_len == len &&
1179             memcmp(cp.data, hdev->adv_data, len) == 0)
1180                 return;
1181
1182         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1183         hdev->adv_data_len = len;
1184
1185         cp.length = len;
1186
1187         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1188 }
1189
1190 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1191 {
1192         struct hci_request req;
1193
1194         hci_req_init(&req, hdev);
1195         __hci_req_update_adv_data(&req, instance);
1196
1197         return hci_req_run(&req, NULL);
1198 }
1199
1200 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1201 {
1202         BT_DBG("%s status %u", hdev->name, status);
1203 }
1204
1205 void hci_req_reenable_advertising(struct hci_dev *hdev)
1206 {
1207         struct hci_request req;
1208
1209         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1210             list_empty(&hdev->adv_instances))
1211                 return;
1212
1213         hci_req_init(&req, hdev);
1214
1215         if (hdev->cur_adv_instance) {
1216                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1217                                                 true);
1218         } else {
1219                 __hci_req_update_adv_data(&req, 0x00);
1220                 __hci_req_update_scan_rsp_data(&req, 0x00);
1221                 __hci_req_enable_advertising(&req);
1222         }
1223
1224         hci_req_run(&req, adv_enable_complete);
1225 }
1226
1227 static void adv_timeout_expire(struct work_struct *work)
1228 {
1229         struct hci_dev *hdev = container_of(work, struct hci_dev,
1230                                             adv_instance_expire.work);
1231
1232         struct hci_request req;
1233         u8 instance;
1234
1235         BT_DBG("%s", hdev->name);
1236
1237         hci_dev_lock(hdev);
1238
1239         hdev->adv_instance_timeout = 0;
1240
1241         instance = hdev->cur_adv_instance;
1242         if (instance == 0x00)
1243                 goto unlock;
1244
1245         hci_req_init(&req, hdev);
1246
1247         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1248
1249         if (list_empty(&hdev->adv_instances))
1250                 __hci_req_disable_advertising(&req);
1251
1252         hci_req_run(&req, NULL);
1253
1254 unlock:
1255         hci_dev_unlock(hdev);
1256 }
1257
1258 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1259                                     bool force)
1260 {
1261         struct hci_dev *hdev = req->hdev;
1262         struct adv_info *adv_instance = NULL;
1263         u16 timeout;
1264
1265         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1266             list_empty(&hdev->adv_instances))
1267                 return -EPERM;
1268
1269         if (hdev->adv_instance_timeout)
1270                 return -EBUSY;
1271
1272         adv_instance = hci_find_adv_instance(hdev, instance);
1273         if (!adv_instance)
1274                 return -ENOENT;
1275
1276         /* A zero timeout means unlimited advertising. As long as there is
1277          * only one instance, duration should be ignored. We still set a timeout
1278          * in case further instances are being added later on.
1279          *
1280          * If the remaining lifetime of the instance is more than the duration
1281          * then the timeout corresponds to the duration, otherwise it will be
1282          * reduced to the remaining instance lifetime.
1283          */
1284         if (adv_instance->timeout == 0 ||
1285             adv_instance->duration <= adv_instance->remaining_time)
1286                 timeout = adv_instance->duration;
1287         else
1288                 timeout = adv_instance->remaining_time;
1289
1290         /* The remaining time is being reduced unless the instance is being
1291          * advertised without time limit.
1292          */
1293         if (adv_instance->timeout)
1294                 adv_instance->remaining_time =
1295                                 adv_instance->remaining_time - timeout;
1296
1297         hdev->adv_instance_timeout = timeout;
1298         queue_delayed_work(hdev->req_workqueue,
1299                            &hdev->adv_instance_expire,
1300                            msecs_to_jiffies(timeout * 1000));
1301
1302         /* If we're just re-scheduling the same instance again then do not
1303          * execute any HCI commands. This happens when a single instance is
1304          * being advertised.
1305          */
1306         if (!force && hdev->cur_adv_instance == instance &&
1307             hci_dev_test_flag(hdev, HCI_LE_ADV))
1308                 return 0;
1309
1310         hdev->cur_adv_instance = instance;
1311         __hci_req_update_adv_data(req, instance);
1312         __hci_req_update_scan_rsp_data(req, instance);
1313         __hci_req_enable_advertising(req);
1314
1315         return 0;
1316 }
1317
1318 static void cancel_adv_timeout(struct hci_dev *hdev)
1319 {
1320         if (hdev->adv_instance_timeout) {
1321                 hdev->adv_instance_timeout = 0;
1322                 cancel_delayed_work(&hdev->adv_instance_expire);
1323         }
1324 }
1325
1326 /* For a single instance:
1327  * - force == true: The instance will be removed even when its remaining
1328  *   lifetime is not zero.
1329  * - force == false: the instance will be deactivated but kept stored unless
1330  *   the remaining lifetime is zero.
1331  *
1332  * For instance == 0x00:
1333  * - force == true: All instances will be removed regardless of their timeout
1334  *   setting.
1335  * - force == false: Only instances that have a timeout will be removed.
1336  */
1337 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1338                                 struct hci_request *req, u8 instance,
1339                                 bool force)
1340 {
1341         struct adv_info *adv_instance, *n, *next_instance = NULL;
1342         int err;
1343         u8 rem_inst;
1344
1345         /* Cancel any timeout concerning the removed instance(s). */
1346         if (!instance || hdev->cur_adv_instance == instance)
1347                 cancel_adv_timeout(hdev);
1348
1349         /* Get the next instance to advertise BEFORE we remove
1350          * the current one. This can be the same instance again
1351          * if there is only one instance.
1352          */
1353         if (instance && hdev->cur_adv_instance == instance)
1354                 next_instance = hci_get_next_instance(hdev, instance);
1355
1356         if (instance == 0x00) {
1357                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1358                                          list) {
1359                         if (!(force || adv_instance->timeout))
1360                                 continue;
1361
1362                         rem_inst = adv_instance->instance;
1363                         err = hci_remove_adv_instance(hdev, rem_inst);
1364                         if (!err)
1365                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1366                 }
1367         } else {
1368                 adv_instance = hci_find_adv_instance(hdev, instance);
1369
1370                 if (force || (adv_instance && adv_instance->timeout &&
1371                               !adv_instance->remaining_time)) {
1372                         /* Don't advertise a removed instance. */
1373                         if (next_instance &&
1374                             next_instance->instance == instance)
1375                                 next_instance = NULL;
1376
1377                         err = hci_remove_adv_instance(hdev, instance);
1378                         if (!err)
1379                                 mgmt_advertising_removed(sk, hdev, instance);
1380                 }
1381         }
1382
1383         if (!req || !hdev_is_powered(hdev) ||
1384             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1385                 return;
1386
1387         if (next_instance)
1388                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1389                                                 false);
1390 }
1391
1392 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1393 {
1394         struct hci_dev *hdev = req->hdev;
1395
1396         /* If we're advertising or initiating an LE connection we can't
1397          * go ahead and change the random address at this time. This is
1398          * because the eventual initiator address used for the
1399          * subsequently created connection will be undefined (some
1400          * controllers use the new address and others the one we had
1401          * when the operation started).
1402          *
1403          * In this kind of scenario skip the update and let the random
1404          * address be updated at the next cycle.
1405          */
1406         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1407             hci_lookup_le_connect(hdev)) {
1408                 BT_DBG("Deferring random address update");
1409                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1410                 return;
1411         }
1412
1413         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1414 }
1415
1416 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1417                               bool use_rpa, u8 *own_addr_type)
1418 {
1419         struct hci_dev *hdev = req->hdev;
1420         int err;
1421
1422         /* If privacy is enabled use a resolvable private address. If
1423          * current RPA has expired or there is something else than
1424          * the current RPA in use, then generate a new one.
1425          */
1426         if (use_rpa) {
1427                 int to;
1428
1429                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1430
1431                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1432                     !bacmp(&hdev->random_addr, &hdev->rpa))
1433                         return 0;
1434
1435                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1436                 if (err < 0) {
1437                         BT_ERR("%s failed to generate new RPA", hdev->name);
1438                         return err;
1439                 }
1440
1441                 set_random_addr(req, &hdev->rpa);
1442
1443                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1444                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1445
1446                 return 0;
1447         }
1448
1449         /* In case of required privacy without resolvable private address,
1450          * use an non-resolvable private address. This is useful for active
1451          * scanning and non-connectable advertising.
1452          */
1453         if (require_privacy) {
1454                 bdaddr_t nrpa;
1455
1456                 while (true) {
1457                         /* The non-resolvable private address is generated
1458                          * from random six bytes with the two most significant
1459                          * bits cleared.
1460                          */
1461                         get_random_bytes(&nrpa, 6);
1462                         nrpa.b[5] &= 0x3f;
1463
1464                         /* The non-resolvable private address shall not be
1465                          * equal to the public address.
1466                          */
1467                         if (bacmp(&hdev->bdaddr, &nrpa))
1468                                 break;
1469                 }
1470
1471                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1472                 set_random_addr(req, &nrpa);
1473                 return 0;
1474         }
1475
1476         /* If forcing static address is in use or there is no public
1477          * address use the static address as random address (but skip
1478          * the HCI command if the current random address is already the
1479          * static one.
1480          *
1481          * In case BR/EDR has been disabled on a dual-mode controller
1482          * and a static address has been configured, then use that
1483          * address instead of the public BR/EDR address.
1484          */
1485         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1486             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1487             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1488              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1489                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1490                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1491                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1492                                     &hdev->static_addr);
1493                 return 0;
1494         }
1495
1496         /* Neither privacy nor static address is being used so use a
1497          * public address.
1498          */
1499         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1500
1501         return 0;
1502 }
1503
1504 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1505 {
1506         struct bdaddr_list *b;
1507
1508         list_for_each_entry(b, &hdev->whitelist, list) {
1509                 struct hci_conn *conn;
1510
1511                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1512                 if (!conn)
1513                         return true;
1514
1515                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1516                         return true;
1517         }
1518
1519         return false;
1520 }
1521
1522 void __hci_req_update_scan(struct hci_request *req)
1523 {
1524         struct hci_dev *hdev = req->hdev;
1525         u8 scan;
1526
1527         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1528                 return;
1529
1530         if (!hdev_is_powered(hdev))
1531                 return;
1532
1533         if (mgmt_powering_down(hdev))
1534                 return;
1535
1536         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1537             disconnected_whitelist_entries(hdev))
1538                 scan = SCAN_PAGE;
1539         else
1540                 scan = SCAN_DISABLED;
1541
1542         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1543                 scan |= SCAN_INQUIRY;
1544
1545         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1546             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1547                 return;
1548
1549         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1550 }
1551
1552 static int update_scan(struct hci_request *req, unsigned long opt)
1553 {
1554         hci_dev_lock(req->hdev);
1555         __hci_req_update_scan(req);
1556         hci_dev_unlock(req->hdev);
1557         return 0;
1558 }
1559
1560 static void scan_update_work(struct work_struct *work)
1561 {
1562         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1563
1564         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1565 }
1566
1567 static int connectable_update(struct hci_request *req, unsigned long opt)
1568 {
1569         struct hci_dev *hdev = req->hdev;
1570
1571         hci_dev_lock(hdev);
1572
1573         __hci_req_update_scan(req);
1574
1575         /* If BR/EDR is not enabled and we disable advertising as a
1576          * by-product of disabling connectable, we need to update the
1577          * advertising flags.
1578          */
1579         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1580                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1581
1582         /* Update the advertising parameters if necessary */
1583         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1584             !list_empty(&hdev->adv_instances))
1585                 __hci_req_enable_advertising(req);
1586
1587         __hci_update_background_scan(req);
1588
1589         hci_dev_unlock(hdev);
1590
1591         return 0;
1592 }
1593
1594 static void connectable_update_work(struct work_struct *work)
1595 {
1596         struct hci_dev *hdev = container_of(work, struct hci_dev,
1597                                             connectable_update);
1598         u8 status;
1599
1600         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1601         mgmt_set_connectable_complete(hdev, status);
1602 }
1603
1604 static u8 get_service_classes(struct hci_dev *hdev)
1605 {
1606         struct bt_uuid *uuid;
1607         u8 val = 0;
1608
1609         list_for_each_entry(uuid, &hdev->uuids, list)
1610                 val |= uuid->svc_hint;
1611
1612         return val;
1613 }
1614
1615 void __hci_req_update_class(struct hci_request *req)
1616 {
1617         struct hci_dev *hdev = req->hdev;
1618         u8 cod[3];
1619
1620         BT_DBG("%s", hdev->name);
1621
1622         if (!hdev_is_powered(hdev))
1623                 return;
1624
1625         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1626                 return;
1627
1628         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1629                 return;
1630
1631         cod[0] = hdev->minor_class;
1632         cod[1] = hdev->major_class;
1633         cod[2] = get_service_classes(hdev);
1634
1635         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1636                 cod[1] |= 0x20;
1637
1638         if (memcmp(cod, hdev->dev_class, 3) == 0)
1639                 return;
1640
1641         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1642 }
1643
1644 static void write_iac(struct hci_request *req)
1645 {
1646         struct hci_dev *hdev = req->hdev;
1647         struct hci_cp_write_current_iac_lap cp;
1648
1649         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1650                 return;
1651
1652         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1653                 /* Limited discoverable mode */
1654                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1655                 cp.iac_lap[0] = 0x00;   /* LIAC */
1656                 cp.iac_lap[1] = 0x8b;
1657                 cp.iac_lap[2] = 0x9e;
1658                 cp.iac_lap[3] = 0x33;   /* GIAC */
1659                 cp.iac_lap[4] = 0x8b;
1660                 cp.iac_lap[5] = 0x9e;
1661         } else {
1662                 /* General discoverable mode */
1663                 cp.num_iac = 1;
1664                 cp.iac_lap[0] = 0x33;   /* GIAC */
1665                 cp.iac_lap[1] = 0x8b;
1666                 cp.iac_lap[2] = 0x9e;
1667         }
1668
1669         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1670                     (cp.num_iac * 3) + 1, &cp);
1671 }
1672
1673 static int discoverable_update(struct hci_request *req, unsigned long opt)
1674 {
1675         struct hci_dev *hdev = req->hdev;
1676
1677         hci_dev_lock(hdev);
1678
1679         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1680                 write_iac(req);
1681                 __hci_req_update_scan(req);
1682                 __hci_req_update_class(req);
1683         }
1684
1685         /* Advertising instances don't use the global discoverable setting, so
1686          * only update AD if advertising was enabled using Set Advertising.
1687          */
1688         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1689                 __hci_req_update_adv_data(req, 0x00);
1690
1691                 /* Discoverable mode affects the local advertising
1692                  * address in limited privacy mode.
1693                  */
1694                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1695                         __hci_req_enable_advertising(req);
1696         }
1697
1698         hci_dev_unlock(hdev);
1699
1700         return 0;
1701 }
1702
1703 static void discoverable_update_work(struct work_struct *work)
1704 {
1705         struct hci_dev *hdev = container_of(work, struct hci_dev,
1706                                             discoverable_update);
1707         u8 status;
1708
1709         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1710         mgmt_set_discoverable_complete(hdev, status);
1711 }
1712
1713 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1714                       u8 reason)
1715 {
1716         switch (conn->state) {
1717         case BT_CONNECTED:
1718         case BT_CONFIG:
1719                 if (conn->type == AMP_LINK) {
1720                         struct hci_cp_disconn_phy_link cp;
1721
1722                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1723                         cp.reason = reason;
1724                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1725                                     &cp);
1726                 } else {
1727                         struct hci_cp_disconnect dc;
1728
1729                         dc.handle = cpu_to_le16(conn->handle);
1730                         dc.reason = reason;
1731                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1732                 }
1733
1734                 conn->state = BT_DISCONN;
1735
1736                 break;
1737         case BT_CONNECT:
1738                 if (conn->type == LE_LINK) {
1739                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1740                                 break;
1741                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1742                                     0, NULL);
1743                 } else if (conn->type == ACL_LINK) {
1744                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1745                                 break;
1746                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1747                                     6, &conn->dst);
1748                 }
1749                 break;
1750         case BT_CONNECT2:
1751                 if (conn->type == ACL_LINK) {
1752                         struct hci_cp_reject_conn_req rej;
1753
1754                         bacpy(&rej.bdaddr, &conn->dst);
1755                         rej.reason = reason;
1756
1757                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1758                                     sizeof(rej), &rej);
1759                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1760                         struct hci_cp_reject_sync_conn_req rej;
1761
1762                         bacpy(&rej.bdaddr, &conn->dst);
1763
1764                         /* SCO rejection has its own limited set of
1765                          * allowed error values (0x0D-0x0F) which isn't
1766                          * compatible with most values passed to this
1767                          * function. To be safe hard-code one of the
1768                          * values that's suitable for SCO.
1769                          */
1770                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1771
1772                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1773                                     sizeof(rej), &rej);
1774                 }
1775                 break;
1776         default:
1777                 conn->state = BT_CLOSED;
1778                 break;
1779         }
1780 }
1781
1782 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1783 {
1784         if (status)
1785                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1786 }
1787
1788 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1789 {
1790         struct hci_request req;
1791         int err;
1792
1793         hci_req_init(&req, conn->hdev);
1794
1795         __hci_abort_conn(&req, conn, reason);
1796
1797         err = hci_req_run(&req, abort_conn_complete);
1798         if (err && err != -ENODATA) {
1799                 BT_ERR("Failed to run HCI request: err %d", err);
1800                 return err;
1801         }
1802
1803         return 0;
1804 }
1805
1806 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1807 {
1808         hci_dev_lock(req->hdev);
1809         __hci_update_background_scan(req);
1810         hci_dev_unlock(req->hdev);
1811         return 0;
1812 }
1813
1814 static void bg_scan_update(struct work_struct *work)
1815 {
1816         struct hci_dev *hdev = container_of(work, struct hci_dev,
1817                                             bg_scan_update);
1818         struct hci_conn *conn;
1819         u8 status;
1820         int err;
1821
1822         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1823         if (!err)
1824                 return;
1825
1826         hci_dev_lock(hdev);
1827
1828         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1829         if (conn)
1830                 hci_le_conn_failed(conn, status);
1831
1832         hci_dev_unlock(hdev);
1833 }
1834
1835 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1836 {
1837         hci_req_add_le_scan_disable(req);
1838         return 0;
1839 }
1840
1841 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1842 {
1843         u8 length = opt;
1844         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1845         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1846         struct hci_cp_inquiry cp;
1847
1848         BT_DBG("%s", req->hdev->name);
1849
1850         hci_dev_lock(req->hdev);
1851         hci_inquiry_cache_flush(req->hdev);
1852         hci_dev_unlock(req->hdev);
1853
1854         memset(&cp, 0, sizeof(cp));
1855
1856         if (req->hdev->discovery.limited)
1857                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1858         else
1859                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1860
1861         cp.length = length;
1862
1863         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1864
1865         return 0;
1866 }
1867
1868 static void le_scan_disable_work(struct work_struct *work)
1869 {
1870         struct hci_dev *hdev = container_of(work, struct hci_dev,
1871                                             le_scan_disable.work);
1872         u8 status;
1873
1874         BT_DBG("%s", hdev->name);
1875
1876         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1877                 return;
1878
1879         cancel_delayed_work(&hdev->le_scan_restart);
1880
1881         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1882         if (status) {
1883                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1884                 return;
1885         }
1886
1887         hdev->discovery.scan_start = 0;
1888
1889         /* If we were running LE only scan, change discovery state. If
1890          * we were running both LE and BR/EDR inquiry simultaneously,
1891          * and BR/EDR inquiry is already finished, stop discovery,
1892          * otherwise BR/EDR inquiry will stop discovery when finished.
1893          * If we will resolve remote device name, do not change
1894          * discovery state.
1895          */
1896
1897         if (hdev->discovery.type == DISCOV_TYPE_LE)
1898                 goto discov_stopped;
1899
1900         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1901                 return;
1902
1903         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1904                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1905                     hdev->discovery.state != DISCOVERY_RESOLVING)
1906                         goto discov_stopped;
1907
1908                 return;
1909         }
1910
1911         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1912                      HCI_CMD_TIMEOUT, &status);
1913         if (status) {
1914                 BT_ERR("Inquiry failed: status 0x%02x", status);
1915                 goto discov_stopped;
1916         }
1917
1918         return;
1919
1920 discov_stopped:
1921         hci_dev_lock(hdev);
1922         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1923         hci_dev_unlock(hdev);
1924 }
1925
1926 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1927 {
1928         struct hci_dev *hdev = req->hdev;
1929         struct hci_cp_le_set_scan_enable cp;
1930
1931         /* If controller is not scanning we are done. */
1932         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1933                 return 0;
1934
1935         hci_req_add_le_scan_disable(req);
1936
1937         memset(&cp, 0, sizeof(cp));
1938         cp.enable = LE_SCAN_ENABLE;
1939         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1940         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1941
1942         return 0;
1943 }
1944
1945 static void le_scan_restart_work(struct work_struct *work)
1946 {
1947         struct hci_dev *hdev = container_of(work, struct hci_dev,
1948                                             le_scan_restart.work);
1949         unsigned long timeout, duration, scan_start, now;
1950         u8 status;
1951
1952         BT_DBG("%s", hdev->name);
1953
1954         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1955         if (status) {
1956                 BT_ERR("Failed to restart LE scan: status %d", status);
1957                 return;
1958         }
1959
1960         hci_dev_lock(hdev);
1961
1962         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1963             !hdev->discovery.scan_start)
1964                 goto unlock;
1965
1966         /* When the scan was started, hdev->le_scan_disable has been queued
1967          * after duration from scan_start. During scan restart this job
1968          * has been canceled, and we need to queue it again after proper
1969          * timeout, to make sure that scan does not run indefinitely.
1970          */
1971         duration = hdev->discovery.scan_duration;
1972         scan_start = hdev->discovery.scan_start;
1973         now = jiffies;
1974         if (now - scan_start <= duration) {
1975                 int elapsed;
1976
1977                 if (now >= scan_start)
1978                         elapsed = now - scan_start;
1979                 else
1980                         elapsed = ULONG_MAX - scan_start + now;
1981
1982                 timeout = duration - elapsed;
1983         } else {
1984                 timeout = 0;
1985         }
1986
1987         queue_delayed_work(hdev->req_workqueue,
1988                            &hdev->le_scan_disable, timeout);
1989
1990 unlock:
1991         hci_dev_unlock(hdev);
1992 }
1993
1994 static void disable_advertising(struct hci_request *req)
1995 {
1996         u8 enable = 0x00;
1997
1998         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1999 }
2000
2001 static int active_scan(struct hci_request *req, unsigned long opt)
2002 {
2003         uint16_t interval = opt;
2004         struct hci_dev *hdev = req->hdev;
2005         struct hci_cp_le_set_scan_param param_cp;
2006         struct hci_cp_le_set_scan_enable enable_cp;
2007         u8 own_addr_type;
2008         int err;
2009
2010         BT_DBG("%s", hdev->name);
2011
2012         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2013                 hci_dev_lock(hdev);
2014
2015                 /* Don't let discovery abort an outgoing connection attempt
2016                  * that's using directed advertising.
2017                  */
2018                 if (hci_lookup_le_connect(hdev)) {
2019                         hci_dev_unlock(hdev);
2020                         return -EBUSY;
2021                 }
2022
2023                 cancel_adv_timeout(hdev);
2024                 hci_dev_unlock(hdev);
2025
2026                 disable_advertising(req);
2027         }
2028
2029         /* If controller is scanning, it means the background scanning is
2030          * running. Thus, we should temporarily stop it in order to set the
2031          * discovery scanning parameters.
2032          */
2033         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2034                 hci_req_add_le_scan_disable(req);
2035
2036         /* All active scans will be done with either a resolvable private
2037          * address (when privacy feature has been enabled) or non-resolvable
2038          * private address.
2039          */
2040         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2041                                         &own_addr_type);
2042         if (err < 0)
2043                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2044
2045         memset(&param_cp, 0, sizeof(param_cp));
2046         param_cp.type = LE_SCAN_ACTIVE;
2047         param_cp.interval = cpu_to_le16(interval);
2048         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2049         param_cp.own_address_type = own_addr_type;
2050
2051         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2052                     &param_cp);
2053
2054         memset(&enable_cp, 0, sizeof(enable_cp));
2055         enable_cp.enable = LE_SCAN_ENABLE;
2056         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2057
2058         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2059                     &enable_cp);
2060
2061         return 0;
2062 }
2063
2064 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2065 {
2066         int err;
2067
2068         BT_DBG("%s", req->hdev->name);
2069
2070         err = active_scan(req, opt);
2071         if (err)
2072                 return err;
2073
2074         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2075 }
2076
2077 static void start_discovery(struct hci_dev *hdev, u8 *status)
2078 {
2079         unsigned long timeout;
2080
2081         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2082
2083         switch (hdev->discovery.type) {
2084         case DISCOV_TYPE_BREDR:
2085                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2086                         hci_req_sync(hdev, bredr_inquiry,
2087                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2088                                      status);
2089                 return;
2090         case DISCOV_TYPE_INTERLEAVED:
2091                 /* When running simultaneous discovery, the LE scanning time
2092                  * should occupy the whole discovery time sine BR/EDR inquiry
2093                  * and LE scanning are scheduled by the controller.
2094                  *
2095                  * For interleaving discovery in comparison, BR/EDR inquiry
2096                  * and LE scanning are done sequentially with separate
2097                  * timeouts.
2098                  */
2099                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2100                              &hdev->quirks)) {
2101                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2102                         /* During simultaneous discovery, we double LE scan
2103                          * interval. We must leave some time for the controller
2104                          * to do BR/EDR inquiry.
2105                          */
2106                         hci_req_sync(hdev, interleaved_discov,
2107                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2108                                      status);
2109                         break;
2110                 }
2111
2112                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2113                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2114                              HCI_CMD_TIMEOUT, status);
2115                 break;
2116         case DISCOV_TYPE_LE:
2117                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2118                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2119                              HCI_CMD_TIMEOUT, status);
2120                 break;
2121         default:
2122                 *status = HCI_ERROR_UNSPECIFIED;
2123                 return;
2124         }
2125
2126         if (*status)
2127                 return;
2128
2129         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2130
2131         /* When service discovery is used and the controller has a
2132          * strict duplicate filter, it is important to remember the
2133          * start and duration of the scan. This is required for
2134          * restarting scanning during the discovery phase.
2135          */
2136         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2137                      hdev->discovery.result_filtering) {
2138                 hdev->discovery.scan_start = jiffies;
2139                 hdev->discovery.scan_duration = timeout;
2140         }
2141
2142         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2143                            timeout);
2144 }
2145
2146 bool hci_req_stop_discovery(struct hci_request *req)
2147 {
2148         struct hci_dev *hdev = req->hdev;
2149         struct discovery_state *d = &hdev->discovery;
2150         struct hci_cp_remote_name_req_cancel cp;
2151         struct inquiry_entry *e;
2152         bool ret = false;
2153
2154         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2155
2156         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2157                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2158                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2159
2160                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2161                         cancel_delayed_work(&hdev->le_scan_disable);
2162                         hci_req_add_le_scan_disable(req);
2163                 }
2164
2165                 ret = true;
2166         } else {
2167                 /* Passive scanning */
2168                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2169                         hci_req_add_le_scan_disable(req);
2170                         ret = true;
2171                 }
2172         }
2173
2174         /* No further actions needed for LE-only discovery */
2175         if (d->type == DISCOV_TYPE_LE)
2176                 return ret;
2177
2178         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2179                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2180                                                      NAME_PENDING);
2181                 if (!e)
2182                         return ret;
2183
2184                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2185                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2186                             &cp);
2187                 ret = true;
2188         }
2189
2190         return ret;
2191 }
2192
2193 static int stop_discovery(struct hci_request *req, unsigned long opt)
2194 {
2195         hci_dev_lock(req->hdev);
2196         hci_req_stop_discovery(req);
2197         hci_dev_unlock(req->hdev);
2198
2199         return 0;
2200 }
2201
2202 static void discov_update(struct work_struct *work)
2203 {
2204         struct hci_dev *hdev = container_of(work, struct hci_dev,
2205                                             discov_update);
2206         u8 status = 0;
2207
2208         switch (hdev->discovery.state) {
2209         case DISCOVERY_STARTING:
2210                 start_discovery(hdev, &status);
2211                 mgmt_start_discovery_complete(hdev, status);
2212                 if (status)
2213                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2214                 else
2215                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2216                 break;
2217         case DISCOVERY_STOPPING:
2218                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2219                 mgmt_stop_discovery_complete(hdev, status);
2220                 if (!status)
2221                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2222                 break;
2223         case DISCOVERY_STOPPED:
2224         default:
2225                 return;
2226         }
2227 }
2228
2229 static void discov_off(struct work_struct *work)
2230 {
2231         struct hci_dev *hdev = container_of(work, struct hci_dev,
2232                                             discov_off.work);
2233
2234         BT_DBG("%s", hdev->name);
2235
2236         hci_dev_lock(hdev);
2237
2238         /* When discoverable timeout triggers, then just make sure
2239          * the limited discoverable flag is cleared. Even in the case
2240          * of a timeout triggered from general discoverable, it is
2241          * safe to unconditionally clear the flag.
2242          */
2243         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2244         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2245         hdev->discov_timeout = 0;
2246
2247         hci_dev_unlock(hdev);
2248
2249         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2250         mgmt_new_settings(hdev);
2251 }
2252
2253 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2254 {
2255         struct hci_dev *hdev = req->hdev;
2256         u8 link_sec;
2257
2258         hci_dev_lock(hdev);
2259
2260         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2261             !lmp_host_ssp_capable(hdev)) {
2262                 u8 mode = 0x01;
2263
2264                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2265
2266                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2267                         u8 support = 0x01;
2268
2269                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2270                                     sizeof(support), &support);
2271                 }
2272         }
2273
2274         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2275             lmp_bredr_capable(hdev)) {
2276                 struct hci_cp_write_le_host_supported cp;
2277
2278                 cp.le = 0x01;
2279                 cp.simul = 0x00;
2280
2281                 /* Check first if we already have the right
2282                  * host state (host features set)
2283                  */
2284                 if (cp.le != lmp_host_le_capable(hdev) ||
2285                     cp.simul != lmp_host_le_br_capable(hdev))
2286                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2287                                     sizeof(cp), &cp);
2288         }
2289
2290         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2291                 /* Make sure the controller has a good default for
2292                  * advertising data. This also applies to the case
2293                  * where BR/EDR was toggled during the AUTO_OFF phase.
2294                  */
2295                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2296                     list_empty(&hdev->adv_instances)) {
2297                         __hci_req_update_adv_data(req, 0x00);
2298                         __hci_req_update_scan_rsp_data(req, 0x00);
2299
2300                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2301                                 __hci_req_enable_advertising(req);
2302                 } else if (!list_empty(&hdev->adv_instances)) {
2303                         struct adv_info *adv_instance;
2304
2305                         adv_instance = list_first_entry(&hdev->adv_instances,
2306                                                         struct adv_info, list);
2307                         __hci_req_schedule_adv_instance(req,
2308                                                         adv_instance->instance,
2309                                                         true);
2310                 }
2311         }
2312
2313         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2314         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2315                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2316                             sizeof(link_sec), &link_sec);
2317
2318         if (lmp_bredr_capable(hdev)) {
2319                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2320                         __hci_req_write_fast_connectable(req, true);
2321                 else
2322                         __hci_req_write_fast_connectable(req, false);
2323                 __hci_req_update_scan(req);
2324                 __hci_req_update_class(req);
2325                 __hci_req_update_name(req);
2326                 __hci_req_update_eir(req);
2327         }
2328
2329         hci_dev_unlock(hdev);
2330         return 0;
2331 }
2332
2333 int __hci_req_hci_power_on(struct hci_dev *hdev)
2334 {
2335         /* Register the available SMP channels (BR/EDR and LE) only when
2336          * successfully powering on the controller. This late
2337          * registration is required so that LE SMP can clearly decide if
2338          * the public address or static address is used.
2339          */
2340         smp_register(hdev);
2341
2342         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2343                               NULL);
2344 }
2345
2346 void hci_request_setup(struct hci_dev *hdev)
2347 {
2348         INIT_WORK(&hdev->discov_update, discov_update);
2349         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2350         INIT_WORK(&hdev->scan_update, scan_update_work);
2351         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2352         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2353         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2354         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2355         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2356         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2357 }
2358
2359 void hci_request_cancel_all(struct hci_dev *hdev)
2360 {
2361         hci_req_sync_cancel(hdev, ENODEV);
2362
2363         cancel_work_sync(&hdev->discov_update);
2364         cancel_work_sync(&hdev->bg_scan_update);
2365         cancel_work_sync(&hdev->scan_update);
2366         cancel_work_sync(&hdev->connectable_update);
2367         cancel_work_sync(&hdev->discoverable_update);
2368         cancel_delayed_work_sync(&hdev->discov_off);
2369         cancel_delayed_work_sync(&hdev->le_scan_disable);
2370         cancel_delayed_work_sync(&hdev->le_scan_restart);
2371
2372         if (hdev->adv_instance_timeout) {
2373                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2374                 hdev->adv_instance_timeout = 0;
2375         }
2376 }