2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
47 struct hci_dev *hdev = req->hdev;
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
57 skb_queue_purge(&req->cmd_q);
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
65 skb = skb_peek_tail(&req->cmd_q);
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
77 queue_work(hdev->workqueue, &hdev->cmd_work);
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
84 return req_run(req, complete, NULL);
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
89 return req_run(req, NULL, complete);
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
125 BT_DBG("%s", hdev->name);
127 hci_req_init(&req, hdev);
129 hci_req_add_ev(&req, opcode, plen, param, event);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
143 schedule_timeout(timeout);
145 remove_wait_queue(&hdev->req_wait_q, &wait);
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
150 switch (hdev->req_status) {
152 err = -bt_to_errno(hdev->req_result);
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
164 hdev->req_status = hdev->req_result = 0;
166 hdev->req_skb = NULL;
168 BT_DBG("%s end: err %d", hdev->name, err);
176 return ERR_PTR(-ENODATA);
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
187 EXPORT_SYMBOL(__hci_cmd_sync);
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
192 unsigned long opt, u32 timeout, u8 *hci_status)
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
198 BT_DBG("%s start", hdev->name);
200 hci_req_init(&req, hdev);
202 hdev->req_status = HCI_REQ_PEND;
204 err = func(&req, opt);
207 *hci_status = HCI_ERROR_UNSPECIFIED;
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
216 hdev->req_status = 0;
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
226 if (err == -ENODATA) {
233 *hci_status = HCI_ERROR_UNSPECIFIED;
238 schedule_timeout(timeout);
240 remove_wait_queue(&hdev->req_wait_q, &wait);
242 if (signal_pending(current))
245 switch (hdev->req_status) {
247 err = -bt_to_errno(hdev->req_result);
249 *hci_status = hdev->req_result;
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
261 *hci_status = HCI_ERROR_UNSPECIFIED;
265 kfree_skb(hdev->req_skb);
266 hdev->req_skb = NULL;
267 hdev->req_status = hdev->req_result = 0;
269 BT_DBG("%s end: err %d", hdev->name, err);
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
276 unsigned long opt, u32 timeout, u8 *hci_status)
280 /* Serialize all requests */
281 hci_req_sync_lock(hdev);
282 /* check the state after obtaing the lock to protect the HCI_UP
283 * against any races from hci_dev_do_close when the controller
286 if (test_bit(HCI_UP, &hdev->flags))
287 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
290 hci_req_sync_unlock(hdev);
295 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
298 int len = HCI_COMMAND_HDR_SIZE + plen;
299 struct hci_command_hdr *hdr;
302 skb = bt_skb_alloc(len, GFP_ATOMIC);
306 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
307 hdr->opcode = cpu_to_le16(opcode);
311 skb_put_data(skb, param, plen);
313 BT_DBG("skb len %d", skb->len);
315 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
316 hci_skb_opcode(skb) = opcode;
321 /* Queue a command to an asynchronous HCI request */
322 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
323 const void *param, u8 event)
325 struct hci_dev *hdev = req->hdev;
328 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
330 /* If an error occurred during request building, there is no point in
331 * queueing the HCI command. We can simply return.
336 skb = hci_prepare_cmd(hdev, opcode, plen, param);
338 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
344 if (skb_queue_empty(&req->cmd_q))
345 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
347 bt_cb(skb)->hci.req_event = event;
349 skb_queue_tail(&req->cmd_q, skb);
352 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
355 hci_req_add_ev(req, opcode, plen, param, 0);
358 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
360 struct hci_dev *hdev = req->hdev;
361 struct hci_cp_write_page_scan_activity acp;
364 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
367 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
371 type = PAGE_SCAN_TYPE_INTERLACED;
373 /* 160 msec page scan interval */
374 acp.interval = cpu_to_le16(0x0100);
376 type = PAGE_SCAN_TYPE_STANDARD; /* default */
378 /* default 1.28 sec page scan */
379 acp.interval = cpu_to_le16(0x0800);
382 acp.window = cpu_to_le16(0x0012);
384 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
385 __cpu_to_le16(hdev->page_scan_window) != acp.window)
386 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
389 if (hdev->page_scan_type != type)
390 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
393 /* This function controls the background scanning based on hdev->pend_le_conns
394 * list. If there are pending LE connection we start the background scanning,
395 * otherwise we stop it.
397 * This function requires the caller holds hdev->lock.
399 static void __hci_update_background_scan(struct hci_request *req)
401 struct hci_dev *hdev = req->hdev;
403 if (!test_bit(HCI_UP, &hdev->flags) ||
404 test_bit(HCI_INIT, &hdev->flags) ||
405 hci_dev_test_flag(hdev, HCI_SETUP) ||
406 hci_dev_test_flag(hdev, HCI_CONFIG) ||
407 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
408 hci_dev_test_flag(hdev, HCI_UNREGISTER))
411 /* No point in doing scanning if LE support hasn't been enabled */
412 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
415 /* If discovery is active don't interfere with it */
416 if (hdev->discovery.state != DISCOVERY_STOPPED)
419 /* Reset RSSI and UUID filters when starting background scanning
420 * since these filters are meant for service discovery only.
422 * The Start Discovery and Start Service Discovery operations
423 * ensure to set proper values for RSSI threshold and UUID
424 * filter list. So it is safe to just reset them here.
426 hci_discovery_filter_clear(hdev);
428 if (list_empty(&hdev->pend_le_conns) &&
429 list_empty(&hdev->pend_le_reports)) {
430 /* If there is no pending LE connections or devices
431 * to be scanned for, we should stop the background
435 /* If controller is not scanning we are done. */
436 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
439 hci_req_add_le_scan_disable(req);
441 BT_DBG("%s stopping background scanning", hdev->name);
443 /* If there is at least one pending LE connection, we should
444 * keep the background scan running.
447 /* If controller is connecting, we should not start scanning
448 * since some controllers are not able to scan and connect at
451 if (hci_lookup_le_connect(hdev))
454 /* If controller is currently scanning, we stop it to ensure we
455 * don't miss any advertising (due to duplicates filter).
457 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
458 hci_req_add_le_scan_disable(req);
460 hci_req_add_le_passive_scan(req);
462 BT_DBG("%s starting background scanning", hdev->name);
466 void __hci_req_update_name(struct hci_request *req)
468 struct hci_dev *hdev = req->hdev;
469 struct hci_cp_write_local_name cp;
471 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
473 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
476 #define PNP_INFO_SVCLASS_ID 0x1200
478 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
480 u8 *ptr = data, *uuids_start = NULL;
481 struct bt_uuid *uuid;
486 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 16)
492 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
496 if (uuid16 == PNP_INFO_SVCLASS_ID)
502 uuids_start[1] = EIR_UUID16_ALL;
506 /* Stop if not enough space to put next UUID */
507 if ((ptr - data) + sizeof(u16) > len) {
508 uuids_start[1] = EIR_UUID16_SOME;
512 *ptr++ = (uuid16 & 0x00ff);
513 *ptr++ = (uuid16 & 0xff00) >> 8;
514 uuids_start[0] += sizeof(uuid16);
520 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522 u8 *ptr = data, *uuids_start = NULL;
523 struct bt_uuid *uuid;
528 list_for_each_entry(uuid, &hdev->uuids, list) {
529 if (uuid->size != 32)
535 uuids_start[1] = EIR_UUID32_ALL;
539 /* Stop if not enough space to put next UUID */
540 if ((ptr - data) + sizeof(u32) > len) {
541 uuids_start[1] = EIR_UUID32_SOME;
545 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
547 uuids_start[0] += sizeof(u32);
553 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
555 u8 *ptr = data, *uuids_start = NULL;
556 struct bt_uuid *uuid;
561 list_for_each_entry(uuid, &hdev->uuids, list) {
562 if (uuid->size != 128)
568 uuids_start[1] = EIR_UUID128_ALL;
572 /* Stop if not enough space to put next UUID */
573 if ((ptr - data) + 16 > len) {
574 uuids_start[1] = EIR_UUID128_SOME;
578 memcpy(ptr, uuid->uuid, 16);
580 uuids_start[0] += 16;
586 static void create_eir(struct hci_dev *hdev, u8 *data)
591 name_len = strlen(hdev->dev_name);
597 ptr[1] = EIR_NAME_SHORT;
599 ptr[1] = EIR_NAME_COMPLETE;
601 /* EIR Data length */
602 ptr[0] = name_len + 1;
604 memcpy(ptr + 2, hdev->dev_name, name_len);
606 ptr += (name_len + 2);
609 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
611 ptr[1] = EIR_TX_POWER;
612 ptr[2] = (u8) hdev->inq_tx_power;
617 if (hdev->devid_source > 0) {
619 ptr[1] = EIR_DEVICE_ID;
621 put_unaligned_le16(hdev->devid_source, ptr + 2);
622 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
623 put_unaligned_le16(hdev->devid_product, ptr + 6);
624 put_unaligned_le16(hdev->devid_version, ptr + 8);
629 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
630 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
631 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
634 void __hci_req_update_eir(struct hci_request *req)
636 struct hci_dev *hdev = req->hdev;
637 struct hci_cp_write_eir cp;
639 if (!hdev_is_powered(hdev))
642 if (!lmp_ext_inq_capable(hdev))
645 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
648 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
651 memset(&cp, 0, sizeof(cp));
653 create_eir(hdev, cp.data);
655 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
658 memcpy(hdev->eir, cp.data, sizeof(cp.data));
660 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
663 void hci_req_add_le_scan_disable(struct hci_request *req)
665 struct hci_cp_le_set_scan_enable cp;
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
672 static void add_to_white_list(struct hci_request *req,
673 struct hci_conn_params *params)
675 struct hci_cp_le_add_to_white_list cp;
677 cp.bdaddr_type = params->addr_type;
678 bacpy(&cp.bdaddr, ¶ms->addr);
680 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
683 static u8 update_white_list(struct hci_request *req)
685 struct hci_dev *hdev = req->hdev;
686 struct hci_conn_params *params;
687 struct bdaddr_list *b;
688 uint8_t white_list_entries = 0;
690 /* Go through the current white list programmed into the
691 * controller one by one and check if that address is still
692 * in the list of pending connections or list of devices to
693 * report. If not present in either list, then queue the
694 * command to remove it from the controller.
696 list_for_each_entry(b, &hdev->le_white_list, list) {
697 /* If the device is neither in pend_le_conns nor
698 * pend_le_reports then remove it from the whitelist.
700 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
701 &b->bdaddr, b->bdaddr_type) &&
702 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
703 &b->bdaddr, b->bdaddr_type)) {
704 struct hci_cp_le_del_from_white_list cp;
706 cp.bdaddr_type = b->bdaddr_type;
707 bacpy(&cp.bdaddr, &b->bdaddr);
709 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
714 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
715 /* White list can not be used with RPAs */
719 white_list_entries++;
722 /* Since all no longer valid white list entries have been
723 * removed, walk through the list of pending connections
724 * and ensure that any new device gets programmed into
727 * If the list of the devices is larger than the list of
728 * available white list entries in the controller, then
729 * just abort and return filer policy value to not use the
732 list_for_each_entry(params, &hdev->pend_le_conns, action) {
733 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
734 ¶ms->addr, params->addr_type))
737 if (white_list_entries >= hdev->le_white_list_size) {
738 /* Select filter policy to accept all advertising */
742 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
743 params->addr_type)) {
744 /* White list can not be used with RPAs */
748 white_list_entries++;
749 add_to_white_list(req, params);
752 /* After adding all new pending connections, walk through
753 * the list of pending reports and also add these to the
754 * white list if there is still space.
756 list_for_each_entry(params, &hdev->pend_le_reports, action) {
757 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
758 ¶ms->addr, params->addr_type))
761 if (white_list_entries >= hdev->le_white_list_size) {
762 /* Select filter policy to accept all advertising */
766 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
767 params->addr_type)) {
768 /* White list can not be used with RPAs */
772 white_list_entries++;
773 add_to_white_list(req, params);
776 /* Select filter policy to use white list */
780 static bool scan_use_rpa(struct hci_dev *hdev)
782 return hci_dev_test_flag(hdev, HCI_PRIVACY);
785 void hci_req_add_le_passive_scan(struct hci_request *req)
787 struct hci_cp_le_set_scan_param param_cp;
788 struct hci_cp_le_set_scan_enable enable_cp;
789 struct hci_dev *hdev = req->hdev;
793 /* Set require_privacy to false since no SCAN_REQ are send
794 * during passive scanning. Not using an non-resolvable address
795 * here is important so that peer devices using direct
796 * advertising with our address will be correctly reported
799 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
803 /* Adding or removing entries from the white list must
804 * happen before enabling scanning. The controller does
805 * not allow white list modification while scanning.
807 filter_policy = update_white_list(req);
809 /* When the controller is using random resolvable addresses and
810 * with that having LE privacy enabled, then controllers with
811 * Extended Scanner Filter Policies support can now enable support
812 * for handling directed advertising.
814 * So instead of using filter polices 0x00 (no whitelist)
815 * and 0x01 (whitelist enabled) use the new filter policies
816 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
818 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
819 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
820 filter_policy |= 0x02;
822 memset(¶m_cp, 0, sizeof(param_cp));
823 param_cp.type = LE_SCAN_PASSIVE;
824 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
825 param_cp.window = cpu_to_le16(hdev->le_scan_window);
826 param_cp.own_address_type = own_addr_type;
827 param_cp.filter_policy = filter_policy;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
831 memset(&enable_cp, 0, sizeof(enable_cp));
832 enable_cp.enable = LE_SCAN_ENABLE;
833 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
834 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
838 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
840 u8 instance = hdev->cur_adv_instance;
841 struct adv_info *adv_instance;
843 /* Ignore instance 0 */
844 if (instance == 0x00)
847 adv_instance = hci_find_adv_instance(hdev, instance);
851 /* TODO: Take into account the "appearance" and "local-name" flags here.
852 * These are currently being ignored as they are not supported.
854 return adv_instance->scan_rsp_len;
857 void __hci_req_disable_advertising(struct hci_request *req)
861 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
864 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
867 struct adv_info *adv_instance;
869 if (instance == 0x00) {
870 /* Instance 0 always manages the "Tx Power" and "Flags"
873 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
875 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
876 * corresponds to the "connectable" instance flag.
878 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
879 flags |= MGMT_ADV_FLAG_CONNECTABLE;
881 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
882 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
883 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884 flags |= MGMT_ADV_FLAG_DISCOV;
889 adv_instance = hci_find_adv_instance(hdev, instance);
891 /* Return 0 when we got an invalid instance identifier. */
895 return adv_instance->flags;
898 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
900 /* If privacy is not enabled don't use RPA */
901 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
904 /* If basic privacy mode is enabled use RPA */
905 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
908 /* If limited privacy mode is enabled don't use RPA if we're
909 * both discoverable and bondable.
911 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
912 hci_dev_test_flag(hdev, HCI_BONDABLE))
915 /* We're neither bondable nor discoverable in the limited
916 * privacy mode, therefore use RPA.
921 void __hci_req_enable_advertising(struct hci_request *req)
923 struct hci_dev *hdev = req->hdev;
924 struct hci_cp_le_set_adv_param cp;
925 u8 own_addr_type, enable = 0x01;
929 if (hci_conn_num(hdev, LE_LINK) > 0)
932 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
933 __hci_req_disable_advertising(req);
935 /* Clear the HCI_LE_ADV bit temporarily so that the
936 * hci_update_random_address knows that it's safe to go ahead
937 * and write a new random address. The flag will be set back on
938 * as soon as the SET_ADV_ENABLE HCI command completes.
940 hci_dev_clear_flag(hdev, HCI_LE_ADV);
942 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
944 /* If the "connectable" instance flag was not set, then choose between
945 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
947 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
948 mgmt_get_connectable(hdev);
950 /* Set require_privacy to true only when non-connectable
951 * advertising is used. In that case it is fine to use a
952 * non-resolvable private address.
954 if (hci_update_random_address(req, !connectable,
955 adv_use_rpa(hdev, flags),
959 memset(&cp, 0, sizeof(cp));
960 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
961 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
964 cp.type = LE_ADV_IND;
965 else if (get_cur_adv_instance_scan_rsp_len(hdev))
966 cp.type = LE_ADV_SCAN_IND;
968 cp.type = LE_ADV_NONCONN_IND;
970 cp.own_address_type = own_addr_type;
971 cp.channel_map = hdev->le_adv_channel_map;
973 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
975 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
978 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
983 /* no space left for name (+ NULL + type + len) */
984 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
987 /* use complete name if present and fits */
988 complete_len = strlen(hdev->dev_name);
989 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
990 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
991 hdev->dev_name, complete_len + 1);
993 /* use short name if present */
994 short_len = strlen(hdev->short_name);
996 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
997 hdev->short_name, short_len + 1);
999 /* use shortened full name if present, we already know that name
1000 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1003 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1005 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1006 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1008 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1015 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1017 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1020 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1022 u8 scan_rsp_len = 0;
1024 if (hdev->appearance) {
1025 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1028 return append_local_name(hdev, ptr, scan_rsp_len);
1031 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1034 struct adv_info *adv_instance;
1036 u8 scan_rsp_len = 0;
1038 adv_instance = hci_find_adv_instance(hdev, instance);
1042 instance_flags = adv_instance->flags;
1044 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1045 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1048 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1049 adv_instance->scan_rsp_len);
1051 scan_rsp_len += adv_instance->scan_rsp_len;
1053 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1054 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1056 return scan_rsp_len;
1059 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1061 struct hci_dev *hdev = req->hdev;
1062 struct hci_cp_le_set_scan_rsp_data cp;
1065 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1068 memset(&cp, 0, sizeof(cp));
1071 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1073 len = create_default_scan_rsp_data(hdev, cp.data);
1075 if (hdev->scan_rsp_data_len == len &&
1076 !memcmp(cp.data, hdev->scan_rsp_data, len))
1079 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1080 hdev->scan_rsp_data_len = len;
1084 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1087 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1089 struct adv_info *adv_instance = NULL;
1090 u8 ad_len = 0, flags = 0;
1093 /* Return 0 when the current instance identifier is invalid. */
1095 adv_instance = hci_find_adv_instance(hdev, instance);
1100 instance_flags = get_adv_instance_flags(hdev, instance);
1102 /* If instance already has the flags set skip adding it once
1105 if (adv_instance && eir_get_data(adv_instance->adv_data,
1106 adv_instance->adv_data_len, EIR_FLAGS,
1110 /* The Add Advertising command allows userspace to set both the general
1111 * and limited discoverable flags.
1113 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1114 flags |= LE_AD_GENERAL;
1116 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1117 flags |= LE_AD_LIMITED;
1119 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1120 flags |= LE_AD_NO_BREDR;
1122 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1123 /* If a discovery flag wasn't provided, simply use the global
1127 flags |= mgmt_get_adv_discov_flags(hdev);
1129 /* If flags would still be empty, then there is no need to
1130 * include the "Flags" AD field".
1144 memcpy(ptr, adv_instance->adv_data,
1145 adv_instance->adv_data_len);
1146 ad_len += adv_instance->adv_data_len;
1147 ptr += adv_instance->adv_data_len;
1150 /* Provide Tx Power only if we can provide a valid value for it */
1151 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1152 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1154 ptr[1] = EIR_TX_POWER;
1155 ptr[2] = (u8)hdev->adv_tx_power;
1164 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1166 struct hci_dev *hdev = req->hdev;
1167 struct hci_cp_le_set_adv_data cp;
1170 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1173 memset(&cp, 0, sizeof(cp));
1175 len = create_instance_adv_data(hdev, instance, cp.data);
1177 /* There's nothing to do if the data hasn't changed */
1178 if (hdev->adv_data_len == len &&
1179 memcmp(cp.data, hdev->adv_data, len) == 0)
1182 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1183 hdev->adv_data_len = len;
1187 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1190 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1192 struct hci_request req;
1194 hci_req_init(&req, hdev);
1195 __hci_req_update_adv_data(&req, instance);
1197 return hci_req_run(&req, NULL);
1200 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1202 BT_DBG("%s status %u", hdev->name, status);
1205 void hci_req_reenable_advertising(struct hci_dev *hdev)
1207 struct hci_request req;
1209 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1210 list_empty(&hdev->adv_instances))
1213 hci_req_init(&req, hdev);
1215 if (hdev->cur_adv_instance) {
1216 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1219 __hci_req_update_adv_data(&req, 0x00);
1220 __hci_req_update_scan_rsp_data(&req, 0x00);
1221 __hci_req_enable_advertising(&req);
1224 hci_req_run(&req, adv_enable_complete);
1227 static void adv_timeout_expire(struct work_struct *work)
1229 struct hci_dev *hdev = container_of(work, struct hci_dev,
1230 adv_instance_expire.work);
1232 struct hci_request req;
1235 BT_DBG("%s", hdev->name);
1239 hdev->adv_instance_timeout = 0;
1241 instance = hdev->cur_adv_instance;
1242 if (instance == 0x00)
1245 hci_req_init(&req, hdev);
1247 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1249 if (list_empty(&hdev->adv_instances))
1250 __hci_req_disable_advertising(&req);
1252 hci_req_run(&req, NULL);
1255 hci_dev_unlock(hdev);
1258 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1261 struct hci_dev *hdev = req->hdev;
1262 struct adv_info *adv_instance = NULL;
1265 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1266 list_empty(&hdev->adv_instances))
1269 if (hdev->adv_instance_timeout)
1272 adv_instance = hci_find_adv_instance(hdev, instance);
1276 /* A zero timeout means unlimited advertising. As long as there is
1277 * only one instance, duration should be ignored. We still set a timeout
1278 * in case further instances are being added later on.
1280 * If the remaining lifetime of the instance is more than the duration
1281 * then the timeout corresponds to the duration, otherwise it will be
1282 * reduced to the remaining instance lifetime.
1284 if (adv_instance->timeout == 0 ||
1285 adv_instance->duration <= adv_instance->remaining_time)
1286 timeout = adv_instance->duration;
1288 timeout = adv_instance->remaining_time;
1290 /* The remaining time is being reduced unless the instance is being
1291 * advertised without time limit.
1293 if (adv_instance->timeout)
1294 adv_instance->remaining_time =
1295 adv_instance->remaining_time - timeout;
1297 hdev->adv_instance_timeout = timeout;
1298 queue_delayed_work(hdev->req_workqueue,
1299 &hdev->adv_instance_expire,
1300 msecs_to_jiffies(timeout * 1000));
1302 /* If we're just re-scheduling the same instance again then do not
1303 * execute any HCI commands. This happens when a single instance is
1306 if (!force && hdev->cur_adv_instance == instance &&
1307 hci_dev_test_flag(hdev, HCI_LE_ADV))
1310 hdev->cur_adv_instance = instance;
1311 __hci_req_update_adv_data(req, instance);
1312 __hci_req_update_scan_rsp_data(req, instance);
1313 __hci_req_enable_advertising(req);
1318 static void cancel_adv_timeout(struct hci_dev *hdev)
1320 if (hdev->adv_instance_timeout) {
1321 hdev->adv_instance_timeout = 0;
1322 cancel_delayed_work(&hdev->adv_instance_expire);
1326 /* For a single instance:
1327 * - force == true: The instance will be removed even when its remaining
1328 * lifetime is not zero.
1329 * - force == false: the instance will be deactivated but kept stored unless
1330 * the remaining lifetime is zero.
1332 * For instance == 0x00:
1333 * - force == true: All instances will be removed regardless of their timeout
1335 * - force == false: Only instances that have a timeout will be removed.
1337 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1338 struct hci_request *req, u8 instance,
1341 struct adv_info *adv_instance, *n, *next_instance = NULL;
1345 /* Cancel any timeout concerning the removed instance(s). */
1346 if (!instance || hdev->cur_adv_instance == instance)
1347 cancel_adv_timeout(hdev);
1349 /* Get the next instance to advertise BEFORE we remove
1350 * the current one. This can be the same instance again
1351 * if there is only one instance.
1353 if (instance && hdev->cur_adv_instance == instance)
1354 next_instance = hci_get_next_instance(hdev, instance);
1356 if (instance == 0x00) {
1357 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1359 if (!(force || adv_instance->timeout))
1362 rem_inst = adv_instance->instance;
1363 err = hci_remove_adv_instance(hdev, rem_inst);
1365 mgmt_advertising_removed(sk, hdev, rem_inst);
1368 adv_instance = hci_find_adv_instance(hdev, instance);
1370 if (force || (adv_instance && adv_instance->timeout &&
1371 !adv_instance->remaining_time)) {
1372 /* Don't advertise a removed instance. */
1373 if (next_instance &&
1374 next_instance->instance == instance)
1375 next_instance = NULL;
1377 err = hci_remove_adv_instance(hdev, instance);
1379 mgmt_advertising_removed(sk, hdev, instance);
1383 if (!req || !hdev_is_powered(hdev) ||
1384 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1388 __hci_req_schedule_adv_instance(req, next_instance->instance,
1392 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1394 struct hci_dev *hdev = req->hdev;
1396 /* If we're advertising or initiating an LE connection we can't
1397 * go ahead and change the random address at this time. This is
1398 * because the eventual initiator address used for the
1399 * subsequently created connection will be undefined (some
1400 * controllers use the new address and others the one we had
1401 * when the operation started).
1403 * In this kind of scenario skip the update and let the random
1404 * address be updated at the next cycle.
1406 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1407 hci_lookup_le_connect(hdev)) {
1408 BT_DBG("Deferring random address update");
1409 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1413 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1416 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1417 bool use_rpa, u8 *own_addr_type)
1419 struct hci_dev *hdev = req->hdev;
1422 /* If privacy is enabled use a resolvable private address. If
1423 * current RPA has expired or there is something else than
1424 * the current RPA in use, then generate a new one.
1429 *own_addr_type = ADDR_LE_DEV_RANDOM;
1431 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1432 !bacmp(&hdev->random_addr, &hdev->rpa))
1435 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1437 BT_ERR("%s failed to generate new RPA", hdev->name);
1441 set_random_addr(req, &hdev->rpa);
1443 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1444 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1449 /* In case of required privacy without resolvable private address,
1450 * use an non-resolvable private address. This is useful for active
1451 * scanning and non-connectable advertising.
1453 if (require_privacy) {
1457 /* The non-resolvable private address is generated
1458 * from random six bytes with the two most significant
1461 get_random_bytes(&nrpa, 6);
1464 /* The non-resolvable private address shall not be
1465 * equal to the public address.
1467 if (bacmp(&hdev->bdaddr, &nrpa))
1471 *own_addr_type = ADDR_LE_DEV_RANDOM;
1472 set_random_addr(req, &nrpa);
1476 /* If forcing static address is in use or there is no public
1477 * address use the static address as random address (but skip
1478 * the HCI command if the current random address is already the
1481 * In case BR/EDR has been disabled on a dual-mode controller
1482 * and a static address has been configured, then use that
1483 * address instead of the public BR/EDR address.
1485 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1486 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1487 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1488 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1489 *own_addr_type = ADDR_LE_DEV_RANDOM;
1490 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1491 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1492 &hdev->static_addr);
1496 /* Neither privacy nor static address is being used so use a
1499 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1504 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1506 struct bdaddr_list *b;
1508 list_for_each_entry(b, &hdev->whitelist, list) {
1509 struct hci_conn *conn;
1511 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1515 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1522 void __hci_req_update_scan(struct hci_request *req)
1524 struct hci_dev *hdev = req->hdev;
1527 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1530 if (!hdev_is_powered(hdev))
1533 if (mgmt_powering_down(hdev))
1536 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1537 disconnected_whitelist_entries(hdev))
1540 scan = SCAN_DISABLED;
1542 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1543 scan |= SCAN_INQUIRY;
1545 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1546 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1549 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1552 static int update_scan(struct hci_request *req, unsigned long opt)
1554 hci_dev_lock(req->hdev);
1555 __hci_req_update_scan(req);
1556 hci_dev_unlock(req->hdev);
1560 static void scan_update_work(struct work_struct *work)
1562 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1564 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1567 static int connectable_update(struct hci_request *req, unsigned long opt)
1569 struct hci_dev *hdev = req->hdev;
1573 __hci_req_update_scan(req);
1575 /* If BR/EDR is not enabled and we disable advertising as a
1576 * by-product of disabling connectable, we need to update the
1577 * advertising flags.
1579 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1580 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1582 /* Update the advertising parameters if necessary */
1583 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1584 !list_empty(&hdev->adv_instances))
1585 __hci_req_enable_advertising(req);
1587 __hci_update_background_scan(req);
1589 hci_dev_unlock(hdev);
1594 static void connectable_update_work(struct work_struct *work)
1596 struct hci_dev *hdev = container_of(work, struct hci_dev,
1597 connectable_update);
1600 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1601 mgmt_set_connectable_complete(hdev, status);
1604 static u8 get_service_classes(struct hci_dev *hdev)
1606 struct bt_uuid *uuid;
1609 list_for_each_entry(uuid, &hdev->uuids, list)
1610 val |= uuid->svc_hint;
1615 void __hci_req_update_class(struct hci_request *req)
1617 struct hci_dev *hdev = req->hdev;
1620 BT_DBG("%s", hdev->name);
1622 if (!hdev_is_powered(hdev))
1625 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1628 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1631 cod[0] = hdev->minor_class;
1632 cod[1] = hdev->major_class;
1633 cod[2] = get_service_classes(hdev);
1635 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1638 if (memcmp(cod, hdev->dev_class, 3) == 0)
1641 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1644 static void write_iac(struct hci_request *req)
1646 struct hci_dev *hdev = req->hdev;
1647 struct hci_cp_write_current_iac_lap cp;
1649 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1652 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1653 /* Limited discoverable mode */
1654 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1655 cp.iac_lap[0] = 0x00; /* LIAC */
1656 cp.iac_lap[1] = 0x8b;
1657 cp.iac_lap[2] = 0x9e;
1658 cp.iac_lap[3] = 0x33; /* GIAC */
1659 cp.iac_lap[4] = 0x8b;
1660 cp.iac_lap[5] = 0x9e;
1662 /* General discoverable mode */
1664 cp.iac_lap[0] = 0x33; /* GIAC */
1665 cp.iac_lap[1] = 0x8b;
1666 cp.iac_lap[2] = 0x9e;
1669 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1670 (cp.num_iac * 3) + 1, &cp);
1673 static int discoverable_update(struct hci_request *req, unsigned long opt)
1675 struct hci_dev *hdev = req->hdev;
1679 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1681 __hci_req_update_scan(req);
1682 __hci_req_update_class(req);
1685 /* Advertising instances don't use the global discoverable setting, so
1686 * only update AD if advertising was enabled using Set Advertising.
1688 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1689 __hci_req_update_adv_data(req, 0x00);
1691 /* Discoverable mode affects the local advertising
1692 * address in limited privacy mode.
1694 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1695 __hci_req_enable_advertising(req);
1698 hci_dev_unlock(hdev);
1703 static void discoverable_update_work(struct work_struct *work)
1705 struct hci_dev *hdev = container_of(work, struct hci_dev,
1706 discoverable_update);
1709 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1710 mgmt_set_discoverable_complete(hdev, status);
1713 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1716 switch (conn->state) {
1719 if (conn->type == AMP_LINK) {
1720 struct hci_cp_disconn_phy_link cp;
1722 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1724 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1727 struct hci_cp_disconnect dc;
1729 dc.handle = cpu_to_le16(conn->handle);
1731 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1734 conn->state = BT_DISCONN;
1738 if (conn->type == LE_LINK) {
1739 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1741 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1743 } else if (conn->type == ACL_LINK) {
1744 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1746 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1751 if (conn->type == ACL_LINK) {
1752 struct hci_cp_reject_conn_req rej;
1754 bacpy(&rej.bdaddr, &conn->dst);
1755 rej.reason = reason;
1757 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1759 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1760 struct hci_cp_reject_sync_conn_req rej;
1762 bacpy(&rej.bdaddr, &conn->dst);
1764 /* SCO rejection has its own limited set of
1765 * allowed error values (0x0D-0x0F) which isn't
1766 * compatible with most values passed to this
1767 * function. To be safe hard-code one of the
1768 * values that's suitable for SCO.
1770 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1772 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1777 conn->state = BT_CLOSED;
1782 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1785 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1788 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1790 struct hci_request req;
1793 hci_req_init(&req, conn->hdev);
1795 __hci_abort_conn(&req, conn, reason);
1797 err = hci_req_run(&req, abort_conn_complete);
1798 if (err && err != -ENODATA) {
1799 BT_ERR("Failed to run HCI request: err %d", err);
1806 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1808 hci_dev_lock(req->hdev);
1809 __hci_update_background_scan(req);
1810 hci_dev_unlock(req->hdev);
1814 static void bg_scan_update(struct work_struct *work)
1816 struct hci_dev *hdev = container_of(work, struct hci_dev,
1818 struct hci_conn *conn;
1822 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1828 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1830 hci_le_conn_failed(conn, status);
1832 hci_dev_unlock(hdev);
1835 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1837 hci_req_add_le_scan_disable(req);
1841 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1844 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1845 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1846 struct hci_cp_inquiry cp;
1848 BT_DBG("%s", req->hdev->name);
1850 hci_dev_lock(req->hdev);
1851 hci_inquiry_cache_flush(req->hdev);
1852 hci_dev_unlock(req->hdev);
1854 memset(&cp, 0, sizeof(cp));
1856 if (req->hdev->discovery.limited)
1857 memcpy(&cp.lap, liac, sizeof(cp.lap));
1859 memcpy(&cp.lap, giac, sizeof(cp.lap));
1863 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1868 static void le_scan_disable_work(struct work_struct *work)
1870 struct hci_dev *hdev = container_of(work, struct hci_dev,
1871 le_scan_disable.work);
1874 BT_DBG("%s", hdev->name);
1876 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1879 cancel_delayed_work(&hdev->le_scan_restart);
1881 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1883 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1887 hdev->discovery.scan_start = 0;
1889 /* If we were running LE only scan, change discovery state. If
1890 * we were running both LE and BR/EDR inquiry simultaneously,
1891 * and BR/EDR inquiry is already finished, stop discovery,
1892 * otherwise BR/EDR inquiry will stop discovery when finished.
1893 * If we will resolve remote device name, do not change
1897 if (hdev->discovery.type == DISCOV_TYPE_LE)
1898 goto discov_stopped;
1900 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1903 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1904 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1905 hdev->discovery.state != DISCOVERY_RESOLVING)
1906 goto discov_stopped;
1911 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1912 HCI_CMD_TIMEOUT, &status);
1914 BT_ERR("Inquiry failed: status 0x%02x", status);
1915 goto discov_stopped;
1922 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1923 hci_dev_unlock(hdev);
1926 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1928 struct hci_dev *hdev = req->hdev;
1929 struct hci_cp_le_set_scan_enable cp;
1931 /* If controller is not scanning we are done. */
1932 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1935 hci_req_add_le_scan_disable(req);
1937 memset(&cp, 0, sizeof(cp));
1938 cp.enable = LE_SCAN_ENABLE;
1939 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1940 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1945 static void le_scan_restart_work(struct work_struct *work)
1947 struct hci_dev *hdev = container_of(work, struct hci_dev,
1948 le_scan_restart.work);
1949 unsigned long timeout, duration, scan_start, now;
1952 BT_DBG("%s", hdev->name);
1954 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1956 BT_ERR("Failed to restart LE scan: status %d", status);
1962 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1963 !hdev->discovery.scan_start)
1966 /* When the scan was started, hdev->le_scan_disable has been queued
1967 * after duration from scan_start. During scan restart this job
1968 * has been canceled, and we need to queue it again after proper
1969 * timeout, to make sure that scan does not run indefinitely.
1971 duration = hdev->discovery.scan_duration;
1972 scan_start = hdev->discovery.scan_start;
1974 if (now - scan_start <= duration) {
1977 if (now >= scan_start)
1978 elapsed = now - scan_start;
1980 elapsed = ULONG_MAX - scan_start + now;
1982 timeout = duration - elapsed;
1987 queue_delayed_work(hdev->req_workqueue,
1988 &hdev->le_scan_disable, timeout);
1991 hci_dev_unlock(hdev);
1994 static void disable_advertising(struct hci_request *req)
1998 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
2001 static int active_scan(struct hci_request *req, unsigned long opt)
2003 uint16_t interval = opt;
2004 struct hci_dev *hdev = req->hdev;
2005 struct hci_cp_le_set_scan_param param_cp;
2006 struct hci_cp_le_set_scan_enable enable_cp;
2010 BT_DBG("%s", hdev->name);
2012 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2015 /* Don't let discovery abort an outgoing connection attempt
2016 * that's using directed advertising.
2018 if (hci_lookup_le_connect(hdev)) {
2019 hci_dev_unlock(hdev);
2023 cancel_adv_timeout(hdev);
2024 hci_dev_unlock(hdev);
2026 disable_advertising(req);
2029 /* If controller is scanning, it means the background scanning is
2030 * running. Thus, we should temporarily stop it in order to set the
2031 * discovery scanning parameters.
2033 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2034 hci_req_add_le_scan_disable(req);
2036 /* All active scans will be done with either a resolvable private
2037 * address (when privacy feature has been enabled) or non-resolvable
2040 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2043 own_addr_type = ADDR_LE_DEV_PUBLIC;
2045 memset(¶m_cp, 0, sizeof(param_cp));
2046 param_cp.type = LE_SCAN_ACTIVE;
2047 param_cp.interval = cpu_to_le16(interval);
2048 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2049 param_cp.own_address_type = own_addr_type;
2051 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2054 memset(&enable_cp, 0, sizeof(enable_cp));
2055 enable_cp.enable = LE_SCAN_ENABLE;
2056 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2058 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2064 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2068 BT_DBG("%s", req->hdev->name);
2070 err = active_scan(req, opt);
2074 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2077 static void start_discovery(struct hci_dev *hdev, u8 *status)
2079 unsigned long timeout;
2081 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2083 switch (hdev->discovery.type) {
2084 case DISCOV_TYPE_BREDR:
2085 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2086 hci_req_sync(hdev, bredr_inquiry,
2087 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2090 case DISCOV_TYPE_INTERLEAVED:
2091 /* When running simultaneous discovery, the LE scanning time
2092 * should occupy the whole discovery time sine BR/EDR inquiry
2093 * and LE scanning are scheduled by the controller.
2095 * For interleaving discovery in comparison, BR/EDR inquiry
2096 * and LE scanning are done sequentially with separate
2099 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2101 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2102 /* During simultaneous discovery, we double LE scan
2103 * interval. We must leave some time for the controller
2104 * to do BR/EDR inquiry.
2106 hci_req_sync(hdev, interleaved_discov,
2107 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2112 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2113 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2114 HCI_CMD_TIMEOUT, status);
2116 case DISCOV_TYPE_LE:
2117 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2118 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2119 HCI_CMD_TIMEOUT, status);
2122 *status = HCI_ERROR_UNSPECIFIED;
2129 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2131 /* When service discovery is used and the controller has a
2132 * strict duplicate filter, it is important to remember the
2133 * start and duration of the scan. This is required for
2134 * restarting scanning during the discovery phase.
2136 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2137 hdev->discovery.result_filtering) {
2138 hdev->discovery.scan_start = jiffies;
2139 hdev->discovery.scan_duration = timeout;
2142 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2146 bool hci_req_stop_discovery(struct hci_request *req)
2148 struct hci_dev *hdev = req->hdev;
2149 struct discovery_state *d = &hdev->discovery;
2150 struct hci_cp_remote_name_req_cancel cp;
2151 struct inquiry_entry *e;
2154 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2156 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2157 if (test_bit(HCI_INQUIRY, &hdev->flags))
2158 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2160 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2161 cancel_delayed_work(&hdev->le_scan_disable);
2162 hci_req_add_le_scan_disable(req);
2167 /* Passive scanning */
2168 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2169 hci_req_add_le_scan_disable(req);
2174 /* No further actions needed for LE-only discovery */
2175 if (d->type == DISCOV_TYPE_LE)
2178 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2179 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2184 bacpy(&cp.bdaddr, &e->data.bdaddr);
2185 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2193 static int stop_discovery(struct hci_request *req, unsigned long opt)
2195 hci_dev_lock(req->hdev);
2196 hci_req_stop_discovery(req);
2197 hci_dev_unlock(req->hdev);
2202 static void discov_update(struct work_struct *work)
2204 struct hci_dev *hdev = container_of(work, struct hci_dev,
2208 switch (hdev->discovery.state) {
2209 case DISCOVERY_STARTING:
2210 start_discovery(hdev, &status);
2211 mgmt_start_discovery_complete(hdev, status);
2213 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2215 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2217 case DISCOVERY_STOPPING:
2218 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2219 mgmt_stop_discovery_complete(hdev, status);
2221 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2223 case DISCOVERY_STOPPED:
2229 static void discov_off(struct work_struct *work)
2231 struct hci_dev *hdev = container_of(work, struct hci_dev,
2234 BT_DBG("%s", hdev->name);
2238 /* When discoverable timeout triggers, then just make sure
2239 * the limited discoverable flag is cleared. Even in the case
2240 * of a timeout triggered from general discoverable, it is
2241 * safe to unconditionally clear the flag.
2243 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2244 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2245 hdev->discov_timeout = 0;
2247 hci_dev_unlock(hdev);
2249 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2250 mgmt_new_settings(hdev);
2253 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2255 struct hci_dev *hdev = req->hdev;
2260 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2261 !lmp_host_ssp_capable(hdev)) {
2264 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2266 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2269 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2270 sizeof(support), &support);
2274 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2275 lmp_bredr_capable(hdev)) {
2276 struct hci_cp_write_le_host_supported cp;
2281 /* Check first if we already have the right
2282 * host state (host features set)
2284 if (cp.le != lmp_host_le_capable(hdev) ||
2285 cp.simul != lmp_host_le_br_capable(hdev))
2286 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2290 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2291 /* Make sure the controller has a good default for
2292 * advertising data. This also applies to the case
2293 * where BR/EDR was toggled during the AUTO_OFF phase.
2295 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2296 list_empty(&hdev->adv_instances)) {
2297 __hci_req_update_adv_data(req, 0x00);
2298 __hci_req_update_scan_rsp_data(req, 0x00);
2300 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2301 __hci_req_enable_advertising(req);
2302 } else if (!list_empty(&hdev->adv_instances)) {
2303 struct adv_info *adv_instance;
2305 adv_instance = list_first_entry(&hdev->adv_instances,
2306 struct adv_info, list);
2307 __hci_req_schedule_adv_instance(req,
2308 adv_instance->instance,
2313 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2314 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2315 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2316 sizeof(link_sec), &link_sec);
2318 if (lmp_bredr_capable(hdev)) {
2319 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2320 __hci_req_write_fast_connectable(req, true);
2322 __hci_req_write_fast_connectable(req, false);
2323 __hci_req_update_scan(req);
2324 __hci_req_update_class(req);
2325 __hci_req_update_name(req);
2326 __hci_req_update_eir(req);
2329 hci_dev_unlock(hdev);
2333 int __hci_req_hci_power_on(struct hci_dev *hdev)
2335 /* Register the available SMP channels (BR/EDR and LE) only when
2336 * successfully powering on the controller. This late
2337 * registration is required so that LE SMP can clearly decide if
2338 * the public address or static address is used.
2342 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2346 void hci_request_setup(struct hci_dev *hdev)
2348 INIT_WORK(&hdev->discov_update, discov_update);
2349 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2350 INIT_WORK(&hdev->scan_update, scan_update_work);
2351 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2352 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2353 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2354 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2355 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2356 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2359 void hci_request_cancel_all(struct hci_dev *hdev)
2361 hci_req_sync_cancel(hdev, ENODEV);
2363 cancel_work_sync(&hdev->discov_update);
2364 cancel_work_sync(&hdev->bg_scan_update);
2365 cancel_work_sync(&hdev->scan_update);
2366 cancel_work_sync(&hdev->connectable_update);
2367 cancel_work_sync(&hdev->discoverable_update);
2368 cancel_delayed_work_sync(&hdev->discov_off);
2369 cancel_delayed_work_sync(&hdev->le_scan_disable);
2370 cancel_delayed_work_sync(&hdev->le_scan_restart);
2372 if (hdev->adv_instance_timeout) {
2373 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2374 hdev->adv_instance_timeout = 0;