2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 10
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_FAILED, /* Reserved for future use */
216 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
217 MGMT_STATUS_REJECTED, /* QoS Rejected */
218 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
219 MGMT_STATUS_REJECTED, /* Insufficient Security */
220 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
221 MGMT_STATUS_FAILED, /* Reserved for future use */
222 MGMT_STATUS_BUSY, /* Role Switch Pending */
223 MGMT_STATUS_FAILED, /* Reserved for future use */
224 MGMT_STATUS_FAILED, /* Slot Violation */
225 MGMT_STATUS_FAILED, /* Role Switch Failed */
226 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
227 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
228 MGMT_STATUS_BUSY, /* Host Busy Pairing */
229 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
230 MGMT_STATUS_BUSY, /* Controller Busy */
231 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
232 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
233 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
234 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
235 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
238 static u8 mgmt_status(u8 hci_status)
240 if (hci_status < ARRAY_SIZE(mgmt_status_table))
241 return mgmt_status_table[hci_status];
243 return MGMT_STATUS_FAILED;
246 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
249 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
253 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
254 u16 len, int flag, struct sock *skip_sk)
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, struct sock *skip_sk)
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
264 HCI_MGMT_GENERIC_EVENTS, skip_sk);
267 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
274 static u8 le_addr_type(u8 mgmt_addr_type)
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
279 return ADDR_LE_DEV_RANDOM;
282 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
285 struct mgmt_rp_read_version rp;
287 BT_DBG("sock %p", sk);
289 rp.version = MGMT_VERSION;
290 rp.revision = cpu_to_le16(MGMT_REVISION);
292 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
296 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
299 struct mgmt_rp_read_commands *rp;
300 u16 num_commands, num_events;
304 BT_DBG("sock %p", sk);
306 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
307 num_commands = ARRAY_SIZE(mgmt_commands);
308 num_events = ARRAY_SIZE(mgmt_events);
310 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
311 num_events = ARRAY_SIZE(mgmt_untrusted_events);
314 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
316 rp = kmalloc(rp_size, GFP_KERNEL);
320 rp->num_commands = cpu_to_le16(num_commands);
321 rp->num_events = cpu_to_le16(num_events);
323 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
324 __le16 *opcode = rp->opcodes;
326 for (i = 0; i < num_commands; i++, opcode++)
327 put_unaligned_le16(mgmt_commands[i], opcode);
329 for (i = 0; i < num_events; i++, opcode++)
330 put_unaligned_le16(mgmt_events[i], opcode);
332 __le16 *opcode = rp->opcodes;
334 for (i = 0; i < num_commands; i++, opcode++)
335 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
337 for (i = 0; i < num_events; i++, opcode++)
338 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
341 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
348 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
351 struct mgmt_rp_read_index_list *rp;
357 BT_DBG("sock %p", sk);
359 read_lock(&hci_dev_list_lock);
362 list_for_each_entry(d, &hci_dev_list, list) {
363 if (d->dev_type == HCI_BREDR &&
364 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
368 rp_len = sizeof(*rp) + (2 * count);
369 rp = kmalloc(rp_len, GFP_ATOMIC);
371 read_unlock(&hci_dev_list_lock);
376 list_for_each_entry(d, &hci_dev_list, list) {
377 if (hci_dev_test_flag(d, HCI_SETUP) ||
378 hci_dev_test_flag(d, HCI_CONFIG) ||
379 hci_dev_test_flag(d, HCI_USER_CHANNEL))
382 /* Devices marked as raw-only are neither configured
383 * nor unconfigured controllers.
385 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
388 if (d->dev_type == HCI_BREDR &&
389 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
390 rp->index[count++] = cpu_to_le16(d->id);
391 BT_DBG("Added hci%u", d->id);
395 rp->num_controllers = cpu_to_le16(count);
396 rp_len = sizeof(*rp) + (2 * count);
398 read_unlock(&hci_dev_list_lock);
400 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
408 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
409 void *data, u16 data_len)
411 struct mgmt_rp_read_unconf_index_list *rp;
417 BT_DBG("sock %p", sk);
419 read_lock(&hci_dev_list_lock);
422 list_for_each_entry(d, &hci_dev_list, list) {
423 if (d->dev_type == HCI_BREDR &&
424 hci_dev_test_flag(d, HCI_UNCONFIGURED))
428 rp_len = sizeof(*rp) + (2 * count);
429 rp = kmalloc(rp_len, GFP_ATOMIC);
431 read_unlock(&hci_dev_list_lock);
436 list_for_each_entry(d, &hci_dev_list, list) {
437 if (hci_dev_test_flag(d, HCI_SETUP) ||
438 hci_dev_test_flag(d, HCI_CONFIG) ||
439 hci_dev_test_flag(d, HCI_USER_CHANNEL))
442 /* Devices marked as raw-only are neither configured
443 * nor unconfigured controllers.
445 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
448 if (d->dev_type == HCI_BREDR &&
449 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
450 rp->index[count++] = cpu_to_le16(d->id);
451 BT_DBG("Added hci%u", d->id);
455 rp->num_controllers = cpu_to_le16(count);
456 rp_len = sizeof(*rp) + (2 * count);
458 read_unlock(&hci_dev_list_lock);
460 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
461 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
468 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
469 void *data, u16 data_len)
471 struct mgmt_rp_read_ext_index_list *rp;
477 BT_DBG("sock %p", sk);
479 read_lock(&hci_dev_list_lock);
482 list_for_each_entry(d, &hci_dev_list, list) {
483 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
487 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
488 rp = kmalloc(rp_len, GFP_ATOMIC);
490 read_unlock(&hci_dev_list_lock);
495 list_for_each_entry(d, &hci_dev_list, list) {
496 if (hci_dev_test_flag(d, HCI_SETUP) ||
497 hci_dev_test_flag(d, HCI_CONFIG) ||
498 hci_dev_test_flag(d, HCI_USER_CHANNEL))
501 /* Devices marked as raw-only are neither configured
502 * nor unconfigured controllers.
504 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
507 if (d->dev_type == HCI_BREDR) {
508 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
509 rp->entry[count].type = 0x01;
511 rp->entry[count].type = 0x00;
512 } else if (d->dev_type == HCI_AMP) {
513 rp->entry[count].type = 0x02;
518 rp->entry[count].bus = d->bus;
519 rp->entry[count++].index = cpu_to_le16(d->id);
520 BT_DBG("Added hci%u", d->id);
523 rp->num_controllers = cpu_to_le16(count);
524 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
526 read_unlock(&hci_dev_list_lock);
528 /* If this command is called at least once, then all the
529 * default index and unconfigured index events are disabled
530 * and from now on only extended index events are used.
532 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
533 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
534 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
536 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
537 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
544 static bool is_configured(struct hci_dev *hdev)
546 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
547 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
550 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
551 !bacmp(&hdev->public_addr, BDADDR_ANY))
557 static __le32 get_missing_options(struct hci_dev *hdev)
561 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
562 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
563 options |= MGMT_OPTION_EXTERNAL_CONFIG;
565 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
566 !bacmp(&hdev->public_addr, BDADDR_ANY))
567 options |= MGMT_OPTION_PUBLIC_ADDRESS;
569 return cpu_to_le32(options);
572 static int new_options(struct hci_dev *hdev, struct sock *skip)
574 __le32 options = get_missing_options(hdev);
576 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
577 sizeof(options), skip);
580 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
582 __le32 options = get_missing_options(hdev);
584 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
588 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
589 void *data, u16 data_len)
591 struct mgmt_rp_read_config_info rp;
594 BT_DBG("sock %p %s", sk, hdev->name);
598 memset(&rp, 0, sizeof(rp));
599 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
601 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
602 options |= MGMT_OPTION_EXTERNAL_CONFIG;
604 if (hdev->set_bdaddr)
605 options |= MGMT_OPTION_PUBLIC_ADDRESS;
607 rp.supported_options = cpu_to_le32(options);
608 rp.missing_options = get_missing_options(hdev);
610 hci_dev_unlock(hdev);
612 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
616 static u32 get_supported_settings(struct hci_dev *hdev)
620 settings |= MGMT_SETTING_POWERED;
621 settings |= MGMT_SETTING_BONDABLE;
622 settings |= MGMT_SETTING_DEBUG_KEYS;
623 settings |= MGMT_SETTING_CONNECTABLE;
624 settings |= MGMT_SETTING_DISCOVERABLE;
626 if (lmp_bredr_capable(hdev)) {
627 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
628 settings |= MGMT_SETTING_FAST_CONNECTABLE;
629 settings |= MGMT_SETTING_BREDR;
630 settings |= MGMT_SETTING_LINK_SECURITY;
632 if (lmp_ssp_capable(hdev)) {
633 settings |= MGMT_SETTING_SSP;
634 if (IS_ENABLED(CONFIG_BT_HS))
635 settings |= MGMT_SETTING_HS;
638 if (lmp_sc_capable(hdev))
639 settings |= MGMT_SETTING_SECURE_CONN;
642 if (lmp_le_capable(hdev)) {
643 settings |= MGMT_SETTING_LE;
644 settings |= MGMT_SETTING_ADVERTISING;
645 settings |= MGMT_SETTING_SECURE_CONN;
646 settings |= MGMT_SETTING_PRIVACY;
647 settings |= MGMT_SETTING_STATIC_ADDRESS;
650 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
652 settings |= MGMT_SETTING_CONFIGURATION;
657 static u32 get_current_settings(struct hci_dev *hdev)
661 if (hdev_is_powered(hdev))
662 settings |= MGMT_SETTING_POWERED;
664 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
665 settings |= MGMT_SETTING_CONNECTABLE;
667 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
668 settings |= MGMT_SETTING_FAST_CONNECTABLE;
670 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
671 settings |= MGMT_SETTING_DISCOVERABLE;
673 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
674 settings |= MGMT_SETTING_BONDABLE;
676 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
677 settings |= MGMT_SETTING_BREDR;
679 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
680 settings |= MGMT_SETTING_LE;
682 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
683 settings |= MGMT_SETTING_LINK_SECURITY;
685 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
686 settings |= MGMT_SETTING_SSP;
688 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
689 settings |= MGMT_SETTING_HS;
691 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
692 settings |= MGMT_SETTING_ADVERTISING;
694 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
695 settings |= MGMT_SETTING_SECURE_CONN;
697 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
698 settings |= MGMT_SETTING_DEBUG_KEYS;
700 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
701 settings |= MGMT_SETTING_PRIVACY;
703 /* The current setting for static address has two purposes. The
704 * first is to indicate if the static address will be used and
705 * the second is to indicate if it is actually set.
707 * This means if the static address is not configured, this flag
708 * will never be set. If the address is configured, then if the
709 * address is actually used decides if the flag is set or not.
711 * For single mode LE only controllers and dual-mode controllers
712 * with BR/EDR disabled, the existence of the static address will
715 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
716 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
717 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
718 if (bacmp(&hdev->static_addr, BDADDR_ANY))
719 settings |= MGMT_SETTING_STATIC_ADDRESS;
725 #define PNP_INFO_SVCLASS_ID 0x1200
727 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
729 u8 *ptr = data, *uuids_start = NULL;
730 struct bt_uuid *uuid;
735 list_for_each_entry(uuid, &hdev->uuids, list) {
738 if (uuid->size != 16)
741 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
745 if (uuid16 == PNP_INFO_SVCLASS_ID)
751 uuids_start[1] = EIR_UUID16_ALL;
755 /* Stop if not enough space to put next UUID */
756 if ((ptr - data) + sizeof(u16) > len) {
757 uuids_start[1] = EIR_UUID16_SOME;
761 *ptr++ = (uuid16 & 0x00ff);
762 *ptr++ = (uuid16 & 0xff00) >> 8;
763 uuids_start[0] += sizeof(uuid16);
769 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
771 u8 *ptr = data, *uuids_start = NULL;
772 struct bt_uuid *uuid;
777 list_for_each_entry(uuid, &hdev->uuids, list) {
778 if (uuid->size != 32)
784 uuids_start[1] = EIR_UUID32_ALL;
788 /* Stop if not enough space to put next UUID */
789 if ((ptr - data) + sizeof(u32) > len) {
790 uuids_start[1] = EIR_UUID32_SOME;
794 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
796 uuids_start[0] += sizeof(u32);
802 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
804 u8 *ptr = data, *uuids_start = NULL;
805 struct bt_uuid *uuid;
810 list_for_each_entry(uuid, &hdev->uuids, list) {
811 if (uuid->size != 128)
817 uuids_start[1] = EIR_UUID128_ALL;
821 /* Stop if not enough space to put next UUID */
822 if ((ptr - data) + 16 > len) {
823 uuids_start[1] = EIR_UUID128_SOME;
827 memcpy(ptr, uuid->uuid, 16);
829 uuids_start[0] += 16;
835 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
837 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
840 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
841 struct hci_dev *hdev,
844 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
847 static u8 get_current_adv_instance(struct hci_dev *hdev)
849 /* The "Set Advertising" setting supersedes the "Add Advertising"
850 * setting. Here we set the advertising data based on which
851 * setting was set. When neither apply, default to the global settings,
852 * represented by instance "0".
854 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
855 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
856 return hdev->cur_adv_instance;
861 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
866 name_len = strlen(hdev->dev_name);
868 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
870 if (name_len > max_len) {
872 ptr[1] = EIR_NAME_SHORT;
874 ptr[1] = EIR_NAME_COMPLETE;
876 ptr[0] = name_len + 1;
878 memcpy(ptr + 2, hdev->dev_name, name_len);
880 ad_len += (name_len + 2);
881 ptr += (name_len + 2);
887 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
890 struct adv_info *adv_instance;
892 adv_instance = hci_find_adv_instance(hdev, instance);
896 /* TODO: Set the appropriate entries based on advertising instance flags
897 * here once flags other than 0 are supported.
899 memcpy(ptr, adv_instance->scan_rsp_data,
900 adv_instance->scan_rsp_len);
902 return adv_instance->scan_rsp_len;
905 static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
907 struct hci_dev *hdev = req->hdev;
908 struct hci_cp_le_set_scan_rsp_data cp;
911 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
914 memset(&cp, 0, sizeof(cp));
917 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
919 len = create_default_scan_rsp_data(hdev, cp.data);
921 if (hdev->scan_rsp_data_len == len &&
922 !memcmp(cp.data, hdev->scan_rsp_data, len))
925 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
926 hdev->scan_rsp_data_len = len;
930 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
933 static void update_scan_rsp_data(struct hci_request *req)
935 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
938 static u8 get_adv_discov_flags(struct hci_dev *hdev)
940 struct mgmt_pending_cmd *cmd;
942 /* If there's a pending mgmt command the flags will not yet have
943 * their final values, so check for this first.
945 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 struct mgmt_mode *cp = cmd->param;
949 return LE_AD_GENERAL;
950 else if (cp->val == 0x02)
951 return LE_AD_LIMITED;
953 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
954 return LE_AD_LIMITED;
955 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
956 return LE_AD_GENERAL;
962 static bool get_connectable(struct hci_dev *hdev)
964 struct mgmt_pending_cmd *cmd;
966 /* If there's a pending mgmt command the flag will not yet have
967 * it's final value, so check for this first.
969 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 struct mgmt_mode *cp = cmd->param;
976 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
979 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
982 struct adv_info *adv_instance;
984 if (instance == 0x00) {
985 /* Instance 0 always manages the "Tx Power" and "Flags"
988 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
990 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
991 * corresponds to the "connectable" instance flag.
993 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
994 flags |= MGMT_ADV_FLAG_CONNECTABLE;
999 adv_instance = hci_find_adv_instance(hdev, instance);
1001 /* Return 0 when we got an invalid instance identifier. */
1005 return adv_instance->flags;
1008 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1010 u8 instance = get_current_adv_instance(hdev);
1011 struct adv_info *adv_instance;
1013 /* Ignore instance 0 */
1014 if (instance == 0x00)
1017 adv_instance = hci_find_adv_instance(hdev, instance);
1021 /* TODO: Take into account the "appearance" and "local-name" flags here.
1022 * These are currently being ignored as they are not supported.
1024 return adv_instance->scan_rsp_len;
1027 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1029 struct adv_info *adv_instance = NULL;
1030 u8 ad_len = 0, flags = 0;
1033 /* Return 0 when the current instance identifier is invalid. */
1035 adv_instance = hci_find_adv_instance(hdev, instance);
1040 instance_flags = get_adv_instance_flags(hdev, instance);
1042 /* The Add Advertising command allows userspace to set both the general
1043 * and limited discoverable flags.
1045 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1046 flags |= LE_AD_GENERAL;
1048 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1049 flags |= LE_AD_LIMITED;
1051 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1052 /* If a discovery flag wasn't provided, simply use the global
1056 flags |= get_adv_discov_flags(hdev);
1058 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1059 flags |= LE_AD_NO_BREDR;
1061 /* If flags would still be empty, then there is no need to
1062 * include the "Flags" AD field".
1075 memcpy(ptr, adv_instance->adv_data,
1076 adv_instance->adv_data_len);
1077 ad_len += adv_instance->adv_data_len;
1078 ptr += adv_instance->adv_data_len;
1081 /* Provide Tx Power only if we can provide a valid value for it */
1082 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1083 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1085 ptr[1] = EIR_TX_POWER;
1086 ptr[2] = (u8)hdev->adv_tx_power;
1095 static void update_inst_adv_data(struct hci_request *req, u8 instance)
1097 struct hci_dev *hdev = req->hdev;
1098 struct hci_cp_le_set_adv_data cp;
1101 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1104 memset(&cp, 0, sizeof(cp));
1106 len = create_instance_adv_data(hdev, instance, cp.data);
1108 /* There's nothing to do if the data hasn't changed */
1109 if (hdev->adv_data_len == len &&
1110 memcmp(cp.data, hdev->adv_data, len) == 0)
1113 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1114 hdev->adv_data_len = len;
1118 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1121 static void update_adv_data(struct hci_request *req)
1123 update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1126 int mgmt_update_adv_data(struct hci_dev *hdev)
1128 struct hci_request req;
1130 hci_req_init(&req, hdev);
1131 update_adv_data(&req);
1133 return hci_req_run(&req, NULL);
1136 static void create_eir(struct hci_dev *hdev, u8 *data)
1141 name_len = strlen(hdev->dev_name);
1145 if (name_len > 48) {
1147 ptr[1] = EIR_NAME_SHORT;
1149 ptr[1] = EIR_NAME_COMPLETE;
1151 /* EIR Data length */
1152 ptr[0] = name_len + 1;
1154 memcpy(ptr + 2, hdev->dev_name, name_len);
1156 ptr += (name_len + 2);
1159 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1161 ptr[1] = EIR_TX_POWER;
1162 ptr[2] = (u8) hdev->inq_tx_power;
1167 if (hdev->devid_source > 0) {
1169 ptr[1] = EIR_DEVICE_ID;
1171 put_unaligned_le16(hdev->devid_source, ptr + 2);
1172 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1173 put_unaligned_le16(hdev->devid_product, ptr + 6);
1174 put_unaligned_le16(hdev->devid_version, ptr + 8);
1179 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1180 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1181 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1184 static void update_eir(struct hci_request *req)
1186 struct hci_dev *hdev = req->hdev;
1187 struct hci_cp_write_eir cp;
1189 if (!hdev_is_powered(hdev))
1192 if (!lmp_ext_inq_capable(hdev))
1195 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1198 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1201 memset(&cp, 0, sizeof(cp));
1203 create_eir(hdev, cp.data);
1205 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1208 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1210 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1213 static u8 get_service_classes(struct hci_dev *hdev)
1215 struct bt_uuid *uuid;
1218 list_for_each_entry(uuid, &hdev->uuids, list)
1219 val |= uuid->svc_hint;
1224 static void update_class(struct hci_request *req)
1226 struct hci_dev *hdev = req->hdev;
1229 BT_DBG("%s", hdev->name);
1231 if (!hdev_is_powered(hdev))
1234 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1237 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1240 cod[0] = hdev->minor_class;
1241 cod[1] = hdev->major_class;
1242 cod[2] = get_service_classes(hdev);
1244 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1247 if (memcmp(cod, hdev->dev_class, 3) == 0)
1250 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1253 static void disable_advertising(struct hci_request *req)
1257 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1260 static void enable_advertising(struct hci_request *req)
1262 struct hci_dev *hdev = req->hdev;
1263 struct hci_cp_le_set_adv_param cp;
1264 u8 own_addr_type, enable = 0x01;
1269 if (hci_conn_num(hdev, LE_LINK) > 0)
1272 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1273 disable_advertising(req);
1275 /* Clear the HCI_LE_ADV bit temporarily so that the
1276 * hci_update_random_address knows that it's safe to go ahead
1277 * and write a new random address. The flag will be set back on
1278 * as soon as the SET_ADV_ENABLE HCI command completes.
1280 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1282 instance = get_current_adv_instance(hdev);
1283 flags = get_adv_instance_flags(hdev, instance);
1285 /* If the "connectable" instance flag was not set, then choose between
1286 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1288 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1289 get_connectable(hdev);
1291 /* Set require_privacy to true only when non-connectable
1292 * advertising is used. In that case it is fine to use a
1293 * non-resolvable private address.
1295 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1298 memset(&cp, 0, sizeof(cp));
1299 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1300 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1303 cp.type = LE_ADV_IND;
1304 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1305 cp.type = LE_ADV_SCAN_IND;
1307 cp.type = LE_ADV_NONCONN_IND;
1309 cp.own_address_type = own_addr_type;
1310 cp.channel_map = hdev->le_adv_channel_map;
1312 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1314 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1317 static void service_cache_off(struct work_struct *work)
1319 struct hci_dev *hdev = container_of(work, struct hci_dev,
1320 service_cache.work);
1321 struct hci_request req;
1323 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1326 hci_req_init(&req, hdev);
1333 hci_dev_unlock(hdev);
1335 hci_req_run(&req, NULL);
1338 static void rpa_expired(struct work_struct *work)
1340 struct hci_dev *hdev = container_of(work, struct hci_dev,
1342 struct hci_request req;
1346 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1348 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1351 /* The generation of a new RPA and programming it into the
1352 * controller happens in the enable_advertising() function.
1354 hci_req_init(&req, hdev);
1355 enable_advertising(&req);
1356 hci_req_run(&req, NULL);
1359 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1361 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1364 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1365 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1367 /* Non-mgmt controlled devices get this bit set
1368 * implicitly so that pairing works for them, however
1369 * for mgmt we require user-space to explicitly enable
1372 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1375 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1376 void *data, u16 data_len)
1378 struct mgmt_rp_read_info rp;
1380 BT_DBG("sock %p %s", sk, hdev->name);
1384 memset(&rp, 0, sizeof(rp));
1386 bacpy(&rp.bdaddr, &hdev->bdaddr);
1388 rp.version = hdev->hci_ver;
1389 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1391 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1392 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1394 memcpy(rp.dev_class, hdev->dev_class, 3);
1396 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1397 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1399 hci_dev_unlock(hdev);
1401 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1405 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1407 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1409 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1413 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1415 BT_DBG("%s status 0x%02x", hdev->name, status);
1417 if (hci_conn_count(hdev) == 0) {
1418 cancel_delayed_work(&hdev->power_off);
1419 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1423 static bool hci_stop_discovery(struct hci_request *req)
1425 struct hci_dev *hdev = req->hdev;
1426 struct hci_cp_remote_name_req_cancel cp;
1427 struct inquiry_entry *e;
1429 switch (hdev->discovery.state) {
1430 case DISCOVERY_FINDING:
1431 if (test_bit(HCI_INQUIRY, &hdev->flags))
1432 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1434 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1435 cancel_delayed_work(&hdev->le_scan_disable);
1436 hci_req_add_le_scan_disable(req);
1441 case DISCOVERY_RESOLVING:
1442 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1447 bacpy(&cp.bdaddr, &e->data.bdaddr);
1448 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1454 /* Passive scanning */
1455 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1456 hci_req_add_le_scan_disable(req);
1466 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1469 struct mgmt_ev_advertising_added ev;
1471 ev.instance = instance;
1473 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1476 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1479 struct mgmt_ev_advertising_removed ev;
1481 ev.instance = instance;
1483 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1486 static int schedule_adv_instance(struct hci_request *req, u8 instance,
1488 struct hci_dev *hdev = req->hdev;
1489 struct adv_info *adv_instance = NULL;
1492 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1493 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1496 if (hdev->adv_instance_timeout)
1499 adv_instance = hci_find_adv_instance(hdev, instance);
1503 /* A zero timeout means unlimited advertising. As long as there is
1504 * only one instance, duration should be ignored. We still set a timeout
1505 * in case further instances are being added later on.
1507 * If the remaining lifetime of the instance is more than the duration
1508 * then the timeout corresponds to the duration, otherwise it will be
1509 * reduced to the remaining instance lifetime.
1511 if (adv_instance->timeout == 0 ||
1512 adv_instance->duration <= adv_instance->remaining_time)
1513 timeout = adv_instance->duration;
1515 timeout = adv_instance->remaining_time;
1517 /* The remaining time is being reduced unless the instance is being
1518 * advertised without time limit.
1520 if (adv_instance->timeout)
1521 adv_instance->remaining_time =
1522 adv_instance->remaining_time - timeout;
1524 hdev->adv_instance_timeout = timeout;
1525 queue_delayed_work(hdev->workqueue,
1526 &hdev->adv_instance_expire,
1527 msecs_to_jiffies(timeout * 1000));
1529 /* If we're just re-scheduling the same instance again then do not
1530 * execute any HCI commands. This happens when a single instance is
1533 if (!force && hdev->cur_adv_instance == instance &&
1534 hci_dev_test_flag(hdev, HCI_LE_ADV))
1537 hdev->cur_adv_instance = instance;
1538 update_adv_data(req);
1539 update_scan_rsp_data(req);
1540 enable_advertising(req);
1545 static void cancel_adv_timeout(struct hci_dev *hdev)
1547 if (hdev->adv_instance_timeout) {
1548 hdev->adv_instance_timeout = 0;
1549 cancel_delayed_work(&hdev->adv_instance_expire);
1553 /* For a single instance:
1554 * - force == true: The instance will be removed even when its remaining
1555 * lifetime is not zero.
1556 * - force == false: the instance will be deactivated but kept stored unless
1557 * the remaining lifetime is zero.
1559 * For instance == 0x00:
1560 * - force == true: All instances will be removed regardless of their timeout
1562 * - force == false: Only instances that have a timeout will be removed.
1564 static void clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1565 u8 instance, bool force)
1567 struct adv_info *adv_instance, *n, *next_instance = NULL;
1571 /* Cancel any timeout concerning the removed instance(s). */
1572 if (!instance || hdev->cur_adv_instance == instance)
1573 cancel_adv_timeout(hdev);
1575 /* Get the next instance to advertise BEFORE we remove
1576 * the current one. This can be the same instance again
1577 * if there is only one instance.
1579 if (instance && hdev->cur_adv_instance == instance)
1580 next_instance = hci_get_next_instance(hdev, instance);
1582 if (instance == 0x00) {
1583 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1585 if (!(force || adv_instance->timeout))
1588 rem_inst = adv_instance->instance;
1589 err = hci_remove_adv_instance(hdev, rem_inst);
1591 advertising_removed(NULL, hdev, rem_inst);
1593 hdev->cur_adv_instance = 0x00;
1595 adv_instance = hci_find_adv_instance(hdev, instance);
1597 if (force || (adv_instance && adv_instance->timeout &&
1598 !adv_instance->remaining_time)) {
1599 /* Don't advertise a removed instance. */
1600 if (next_instance &&
1601 next_instance->instance == instance)
1602 next_instance = NULL;
1604 err = hci_remove_adv_instance(hdev, instance);
1606 advertising_removed(NULL, hdev, instance);
1610 if (list_empty(&hdev->adv_instances)) {
1611 hdev->cur_adv_instance = 0x00;
1612 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1615 if (!req || !hdev_is_powered(hdev) ||
1616 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1620 schedule_adv_instance(req, next_instance->instance, false);
1623 static int clean_up_hci_state(struct hci_dev *hdev)
1625 struct hci_request req;
1626 struct hci_conn *conn;
1627 bool discov_stopped;
1630 hci_req_init(&req, hdev);
1632 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1633 test_bit(HCI_PSCAN, &hdev->flags)) {
1635 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1638 clear_adv_instance(hdev, NULL, 0x00, false);
1640 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1641 disable_advertising(&req);
1643 discov_stopped = hci_stop_discovery(&req);
1645 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1646 /* 0x15 == Terminated due to Power Off */
1647 __hci_abort_conn(&req, conn, 0x15);
1650 err = hci_req_run(&req, clean_up_hci_complete);
1651 if (!err && discov_stopped)
1652 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1657 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1660 struct mgmt_mode *cp = data;
1661 struct mgmt_pending_cmd *cmd;
1664 BT_DBG("request for %s", hdev->name);
1666 if (cp->val != 0x00 && cp->val != 0x01)
1667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1668 MGMT_STATUS_INVALID_PARAMS);
1672 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1678 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1679 cancel_delayed_work(&hdev->power_off);
1682 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1684 err = mgmt_powered(hdev, 1);
1689 if (!!cp->val == hdev_is_powered(hdev)) {
1690 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1694 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1701 queue_work(hdev->req_workqueue, &hdev->power_on);
1704 /* Disconnect connections, stop scans, etc */
1705 err = clean_up_hci_state(hdev);
1707 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1708 HCI_POWER_OFF_TIMEOUT);
1710 /* ENODATA means there were no HCI commands queued */
1711 if (err == -ENODATA) {
1712 cancel_delayed_work(&hdev->power_off);
1713 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1719 hci_dev_unlock(hdev);
1723 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1725 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1727 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1731 int mgmt_new_settings(struct hci_dev *hdev)
1733 return new_settings(hdev, NULL);
1738 struct hci_dev *hdev;
1742 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1744 struct cmd_lookup *match = data;
1746 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1748 list_del(&cmd->list);
1750 if (match->sk == NULL) {
1751 match->sk = cmd->sk;
1752 sock_hold(match->sk);
1755 mgmt_pending_free(cmd);
1758 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1762 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1763 mgmt_pending_remove(cmd);
1766 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1768 if (cmd->cmd_complete) {
1771 cmd->cmd_complete(cmd, *status);
1772 mgmt_pending_remove(cmd);
1777 cmd_status_rsp(cmd, data);
1780 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1782 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1783 cmd->param, cmd->param_len);
1786 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1788 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1789 cmd->param, sizeof(struct mgmt_addr_info));
1792 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1794 if (!lmp_bredr_capable(hdev))
1795 return MGMT_STATUS_NOT_SUPPORTED;
1796 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1797 return MGMT_STATUS_REJECTED;
1799 return MGMT_STATUS_SUCCESS;
1802 static u8 mgmt_le_support(struct hci_dev *hdev)
1804 if (!lmp_le_capable(hdev))
1805 return MGMT_STATUS_NOT_SUPPORTED;
1806 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1807 return MGMT_STATUS_REJECTED;
1809 return MGMT_STATUS_SUCCESS;
1812 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1815 struct mgmt_pending_cmd *cmd;
1816 struct mgmt_mode *cp;
1817 struct hci_request req;
1820 BT_DBG("status 0x%02x", status);
1824 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1829 u8 mgmt_err = mgmt_status(status);
1830 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1831 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1837 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1839 if (hdev->discov_timeout > 0) {
1840 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1841 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1845 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1848 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1851 new_settings(hdev, cmd->sk);
1853 /* When the discoverable mode gets changed, make sure
1854 * that class of device has the limited discoverable
1855 * bit correctly set. Also update page scan based on whitelist
1858 hci_req_init(&req, hdev);
1859 __hci_update_page_scan(&req);
1861 hci_req_run(&req, NULL);
1864 mgmt_pending_remove(cmd);
1867 hci_dev_unlock(hdev);
1870 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1873 struct mgmt_cp_set_discoverable *cp = data;
1874 struct mgmt_pending_cmd *cmd;
1875 struct hci_request req;
1880 BT_DBG("request for %s", hdev->name);
1882 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1885 MGMT_STATUS_REJECTED);
1887 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1889 MGMT_STATUS_INVALID_PARAMS);
1891 timeout = __le16_to_cpu(cp->timeout);
1893 /* Disabling discoverable requires that no timeout is set,
1894 * and enabling limited discoverable requires a timeout.
1896 if ((cp->val == 0x00 && timeout > 0) ||
1897 (cp->val == 0x02 && timeout == 0))
1898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1899 MGMT_STATUS_INVALID_PARAMS);
1903 if (!hdev_is_powered(hdev) && timeout > 0) {
1904 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1905 MGMT_STATUS_NOT_POWERED);
1909 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1910 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1911 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1916 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1917 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1918 MGMT_STATUS_REJECTED);
1922 if (!hdev_is_powered(hdev)) {
1923 bool changed = false;
1925 /* Setting limited discoverable when powered off is
1926 * not a valid operation since it requires a timeout
1927 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1929 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1930 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1934 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1939 err = new_settings(hdev, sk);
1944 /* If the current mode is the same, then just update the timeout
1945 * value with the new value. And if only the timeout gets updated,
1946 * then no need for any HCI transactions.
1948 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1949 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1950 HCI_LIMITED_DISCOVERABLE)) {
1951 cancel_delayed_work(&hdev->discov_off);
1952 hdev->discov_timeout = timeout;
1954 if (cp->val && hdev->discov_timeout > 0) {
1955 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1956 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1960 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1970 /* Cancel any potential discoverable timeout that might be
1971 * still active and store new timeout value. The arming of
1972 * the timeout happens in the complete handler.
1974 cancel_delayed_work(&hdev->discov_off);
1975 hdev->discov_timeout = timeout;
1977 /* Limited discoverable mode */
1978 if (cp->val == 0x02)
1979 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1981 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1983 hci_req_init(&req, hdev);
1985 /* The procedure for LE-only controllers is much simpler - just
1986 * update the advertising data.
1988 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1994 struct hci_cp_write_current_iac_lap hci_cp;
1996 if (cp->val == 0x02) {
1997 /* Limited discoverable mode */
1998 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1999 hci_cp.iac_lap[0] = 0x00; /* LIAC */
2000 hci_cp.iac_lap[1] = 0x8b;
2001 hci_cp.iac_lap[2] = 0x9e;
2002 hci_cp.iac_lap[3] = 0x33; /* GIAC */
2003 hci_cp.iac_lap[4] = 0x8b;
2004 hci_cp.iac_lap[5] = 0x9e;
2006 /* General discoverable mode */
2008 hci_cp.iac_lap[0] = 0x33; /* GIAC */
2009 hci_cp.iac_lap[1] = 0x8b;
2010 hci_cp.iac_lap[2] = 0x9e;
2013 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2014 (hci_cp.num_iac * 3) + 1, &hci_cp);
2016 scan |= SCAN_INQUIRY;
2018 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2021 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
2024 update_adv_data(&req);
2026 err = hci_req_run(&req, set_discoverable_complete);
2028 mgmt_pending_remove(cmd);
2031 hci_dev_unlock(hdev);
2035 static void write_fast_connectable(struct hci_request *req, bool enable)
2037 struct hci_dev *hdev = req->hdev;
2038 struct hci_cp_write_page_scan_activity acp;
2041 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2044 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
2048 type = PAGE_SCAN_TYPE_INTERLACED;
2050 /* 160 msec page scan interval */
2051 acp.interval = cpu_to_le16(0x0100);
2053 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2055 /* default 1.28 sec page scan */
2056 acp.interval = cpu_to_le16(0x0800);
2059 acp.window = cpu_to_le16(0x0012);
2061 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
2062 __cpu_to_le16(hdev->page_scan_window) != acp.window)
2063 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
2066 if (hdev->page_scan_type != type)
2067 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2070 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
2073 struct mgmt_pending_cmd *cmd;
2074 struct mgmt_mode *cp;
2075 bool conn_changed, discov_changed;
2077 BT_DBG("status 0x%02x", status);
2081 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
2086 u8 mgmt_err = mgmt_status(status);
2087 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
2093 conn_changed = !hci_dev_test_and_set_flag(hdev,
2095 discov_changed = false;
2097 conn_changed = hci_dev_test_and_clear_flag(hdev,
2099 discov_changed = hci_dev_test_and_clear_flag(hdev,
2103 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
2105 if (conn_changed || discov_changed) {
2106 new_settings(hdev, cmd->sk);
2107 hci_update_page_scan(hdev);
2109 mgmt_update_adv_data(hdev);
2110 hci_update_background_scan(hdev);
2114 mgmt_pending_remove(cmd);
2117 hci_dev_unlock(hdev);
2120 static int set_connectable_update_settings(struct hci_dev *hdev,
2121 struct sock *sk, u8 val)
2123 bool changed = false;
2126 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2130 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2132 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2133 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2136 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2141 hci_update_page_scan(hdev);
2142 hci_update_background_scan(hdev);
2143 return new_settings(hdev, sk);
2149 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2152 struct mgmt_mode *cp = data;
2153 struct mgmt_pending_cmd *cmd;
2154 struct hci_request req;
2158 BT_DBG("request for %s", hdev->name);
2160 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2161 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2163 MGMT_STATUS_REJECTED);
2165 if (cp->val != 0x00 && cp->val != 0x01)
2166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2167 MGMT_STATUS_INVALID_PARAMS);
2171 if (!hdev_is_powered(hdev)) {
2172 err = set_connectable_update_settings(hdev, sk, cp->val);
2176 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2177 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2178 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2183 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2189 hci_req_init(&req, hdev);
2191 /* If BR/EDR is not enabled and we disable advertising as a
2192 * by-product of disabling connectable, we need to update the
2193 * advertising flags.
2195 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2197 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2198 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2200 update_adv_data(&req);
2201 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2205 /* If we don't have any whitelist entries just
2206 * disable all scanning. If there are entries
2207 * and we had both page and inquiry scanning
2208 * enabled then fall back to only page scanning.
2209 * Otherwise no changes are needed.
2211 if (list_empty(&hdev->whitelist))
2212 scan = SCAN_DISABLED;
2213 else if (test_bit(HCI_ISCAN, &hdev->flags))
2216 goto no_scan_update;
2218 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2219 hdev->discov_timeout > 0)
2220 cancel_delayed_work(&hdev->discov_off);
2223 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2227 /* Update the advertising parameters if necessary */
2228 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2229 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2230 enable_advertising(&req);
2232 err = hci_req_run(&req, set_connectable_complete);
2234 mgmt_pending_remove(cmd);
2235 if (err == -ENODATA)
2236 err = set_connectable_update_settings(hdev, sk,
2242 hci_dev_unlock(hdev);
2246 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2249 struct mgmt_mode *cp = data;
2253 BT_DBG("request for %s", hdev->name);
2255 if (cp->val != 0x00 && cp->val != 0x01)
2256 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2257 MGMT_STATUS_INVALID_PARAMS);
2262 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2264 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2266 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2271 err = new_settings(hdev, sk);
2274 hci_dev_unlock(hdev);
2278 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2281 struct mgmt_mode *cp = data;
2282 struct mgmt_pending_cmd *cmd;
2286 BT_DBG("request for %s", hdev->name);
2288 if (!IS_ENABLED(CONFIG_BT_HS))
2289 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2290 MGMT_STATUS_NOT_SUPPORTED);
2292 status = mgmt_bredr_support(hdev);
2294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2297 if (cp->val != 0x00 && cp->val != 0x01)
2298 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2299 MGMT_STATUS_INVALID_PARAMS);
2303 if (!hdev_is_powered(hdev)) {
2304 bool changed = false;
2306 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2307 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2311 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2316 err = new_settings(hdev, sk);
2321 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2322 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2329 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2330 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2334 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2340 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2342 mgmt_pending_remove(cmd);
2347 hci_dev_unlock(hdev);
2351 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2353 struct mgmt_mode *cp = data;
2354 struct mgmt_pending_cmd *cmd;
2358 BT_DBG("request for %s", hdev->name);
2360 status = mgmt_bredr_support(hdev);
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2364 if (!lmp_ssp_capable(hdev))
2365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2366 MGMT_STATUS_NOT_SUPPORTED);
2368 if (cp->val != 0x00 && cp->val != 0x01)
2369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2370 MGMT_STATUS_INVALID_PARAMS);
2374 if (!hdev_is_powered(hdev)) {
2378 changed = !hci_dev_test_and_set_flag(hdev,
2381 changed = hci_dev_test_and_clear_flag(hdev,
2384 changed = hci_dev_test_and_clear_flag(hdev,
2387 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2390 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2395 err = new_settings(hdev, sk);
2400 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2401 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2406 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2407 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2411 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2417 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2418 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2419 sizeof(cp->val), &cp->val);
2421 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2423 mgmt_pending_remove(cmd);
2428 hci_dev_unlock(hdev);
2432 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2434 struct mgmt_mode *cp = data;
2439 BT_DBG("request for %s", hdev->name);
2441 status = mgmt_bredr_support(hdev);
2443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2445 if (!lmp_ssp_capable(hdev))
2446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2447 MGMT_STATUS_NOT_SUPPORTED);
2449 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2451 MGMT_STATUS_REJECTED);
2453 if (cp->val != 0x00 && cp->val != 0x01)
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2455 MGMT_STATUS_INVALID_PARAMS);
2459 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2460 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2466 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2468 if (hdev_is_powered(hdev)) {
2469 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2470 MGMT_STATUS_REJECTED);
2474 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2477 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2482 err = new_settings(hdev, sk);
2485 hci_dev_unlock(hdev);
2489 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2491 struct cmd_lookup match = { NULL, hdev };
2496 u8 mgmt_err = mgmt_status(status);
2498 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2503 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2505 new_settings(hdev, match.sk);
2510 /* Make sure the controller has a good default for
2511 * advertising data. Restrict the update to when LE
2512 * has actually been enabled. During power on, the
2513 * update in powered_update_hci will take care of it.
2515 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2516 struct hci_request req;
2518 hci_req_init(&req, hdev);
2519 update_adv_data(&req);
2520 update_scan_rsp_data(&req);
2521 __hci_update_background_scan(&req);
2522 hci_req_run(&req, NULL);
2526 hci_dev_unlock(hdev);
2529 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2531 struct mgmt_mode *cp = data;
2532 struct hci_cp_write_le_host_supported hci_cp;
2533 struct mgmt_pending_cmd *cmd;
2534 struct hci_request req;
2538 BT_DBG("request for %s", hdev->name);
2540 if (!lmp_le_capable(hdev))
2541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2542 MGMT_STATUS_NOT_SUPPORTED);
2544 if (cp->val != 0x00 && cp->val != 0x01)
2545 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2546 MGMT_STATUS_INVALID_PARAMS);
2548 /* Bluetooth single mode LE only controllers or dual-mode
2549 * controllers configured as LE only devices, do not allow
2550 * switching LE off. These have either LE enabled explicitly
2551 * or BR/EDR has been previously switched off.
2553 * When trying to enable an already enabled LE, then gracefully
2554 * send a positive response. Trying to disable it however will
2555 * result into rejection.
2557 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2558 if (cp->val == 0x01)
2559 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2562 MGMT_STATUS_REJECTED);
2568 enabled = lmp_host_le_capable(hdev);
2571 clear_adv_instance(hdev, NULL, 0x00, true);
2573 if (!hdev_is_powered(hdev) || val == enabled) {
2574 bool changed = false;
2576 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2577 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2581 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2582 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2586 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2591 err = new_settings(hdev, sk);
2596 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2597 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2603 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2609 hci_req_init(&req, hdev);
2611 memset(&hci_cp, 0, sizeof(hci_cp));
2615 hci_cp.simul = 0x00;
2617 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2618 disable_advertising(&req);
2621 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2624 err = hci_req_run(&req, le_enable_complete);
2626 mgmt_pending_remove(cmd);
2629 hci_dev_unlock(hdev);
2633 /* This is a helper function to test for pending mgmt commands that can
2634 * cause CoD or EIR HCI commands. We can only allow one such pending
2635 * mgmt command at a time since otherwise we cannot easily track what
2636 * the current values are, will be, and based on that calculate if a new
2637 * HCI command needs to be sent and if yes with what value.
2639 static bool pending_eir_or_class(struct hci_dev *hdev)
2641 struct mgmt_pending_cmd *cmd;
2643 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2644 switch (cmd->opcode) {
2645 case MGMT_OP_ADD_UUID:
2646 case MGMT_OP_REMOVE_UUID:
2647 case MGMT_OP_SET_DEV_CLASS:
2648 case MGMT_OP_SET_POWERED:
2656 static const u8 bluetooth_base_uuid[] = {
2657 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2658 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2661 static u8 get_uuid_size(const u8 *uuid)
2665 if (memcmp(uuid, bluetooth_base_uuid, 12))
2668 val = get_unaligned_le32(&uuid[12]);
2675 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2677 struct mgmt_pending_cmd *cmd;
2681 cmd = pending_find(mgmt_op, hdev);
2685 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2686 mgmt_status(status), hdev->dev_class, 3);
2688 mgmt_pending_remove(cmd);
2691 hci_dev_unlock(hdev);
2694 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2696 BT_DBG("status 0x%02x", status);
2698 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2701 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2703 struct mgmt_cp_add_uuid *cp = data;
2704 struct mgmt_pending_cmd *cmd;
2705 struct hci_request req;
2706 struct bt_uuid *uuid;
2709 BT_DBG("request for %s", hdev->name);
2713 if (pending_eir_or_class(hdev)) {
2714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2719 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2725 memcpy(uuid->uuid, cp->uuid, 16);
2726 uuid->svc_hint = cp->svc_hint;
2727 uuid->size = get_uuid_size(cp->uuid);
2729 list_add_tail(&uuid->list, &hdev->uuids);
2731 hci_req_init(&req, hdev);
2736 err = hci_req_run(&req, add_uuid_complete);
2738 if (err != -ENODATA)
2741 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2742 hdev->dev_class, 3);
2746 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2755 hci_dev_unlock(hdev);
2759 static bool enable_service_cache(struct hci_dev *hdev)
2761 if (!hdev_is_powered(hdev))
2764 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2765 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2773 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2775 BT_DBG("status 0x%02x", status);
2777 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2780 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2783 struct mgmt_cp_remove_uuid *cp = data;
2784 struct mgmt_pending_cmd *cmd;
2785 struct bt_uuid *match, *tmp;
2786 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2787 struct hci_request req;
2790 BT_DBG("request for %s", hdev->name);
2794 if (pending_eir_or_class(hdev)) {
2795 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2800 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2801 hci_uuids_clear(hdev);
2803 if (enable_service_cache(hdev)) {
2804 err = mgmt_cmd_complete(sk, hdev->id,
2805 MGMT_OP_REMOVE_UUID,
2806 0, hdev->dev_class, 3);
2815 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2816 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2819 list_del(&match->list);
2825 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2826 MGMT_STATUS_INVALID_PARAMS);
2831 hci_req_init(&req, hdev);
2836 err = hci_req_run(&req, remove_uuid_complete);
2838 if (err != -ENODATA)
2841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2842 hdev->dev_class, 3);
2846 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2855 hci_dev_unlock(hdev);
2859 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2861 BT_DBG("status 0x%02x", status);
2863 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2866 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2869 struct mgmt_cp_set_dev_class *cp = data;
2870 struct mgmt_pending_cmd *cmd;
2871 struct hci_request req;
2874 BT_DBG("request for %s", hdev->name);
2876 if (!lmp_bredr_capable(hdev))
2877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2878 MGMT_STATUS_NOT_SUPPORTED);
2882 if (pending_eir_or_class(hdev)) {
2883 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2888 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2890 MGMT_STATUS_INVALID_PARAMS);
2894 hdev->major_class = cp->major;
2895 hdev->minor_class = cp->minor;
2897 if (!hdev_is_powered(hdev)) {
2898 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2899 hdev->dev_class, 3);
2903 hci_req_init(&req, hdev);
2905 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2906 hci_dev_unlock(hdev);
2907 cancel_delayed_work_sync(&hdev->service_cache);
2914 err = hci_req_run(&req, set_class_complete);
2916 if (err != -ENODATA)
2919 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2920 hdev->dev_class, 3);
2924 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2933 hci_dev_unlock(hdev);
2937 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2940 struct mgmt_cp_load_link_keys *cp = data;
2941 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2942 sizeof(struct mgmt_link_key_info));
2943 u16 key_count, expected_len;
2947 BT_DBG("request for %s", hdev->name);
2949 if (!lmp_bredr_capable(hdev))
2950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2951 MGMT_STATUS_NOT_SUPPORTED);
2953 key_count = __le16_to_cpu(cp->key_count);
2954 if (key_count > max_key_count) {
2955 BT_ERR("load_link_keys: too big key_count value %u",
2957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2958 MGMT_STATUS_INVALID_PARAMS);
2961 expected_len = sizeof(*cp) + key_count *
2962 sizeof(struct mgmt_link_key_info);
2963 if (expected_len != len) {
2964 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2967 MGMT_STATUS_INVALID_PARAMS);
2970 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2972 MGMT_STATUS_INVALID_PARAMS);
2974 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2977 for (i = 0; i < key_count; i++) {
2978 struct mgmt_link_key_info *key = &cp->keys[i];
2980 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2981 return mgmt_cmd_status(sk, hdev->id,
2982 MGMT_OP_LOAD_LINK_KEYS,
2983 MGMT_STATUS_INVALID_PARAMS);
2988 hci_link_keys_clear(hdev);
2991 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2993 changed = hci_dev_test_and_clear_flag(hdev,
2994 HCI_KEEP_DEBUG_KEYS);
2997 new_settings(hdev, NULL);
2999 for (i = 0; i < key_count; i++) {
3000 struct mgmt_link_key_info *key = &cp->keys[i];
3002 /* Always ignore debug keys and require a new pairing if
3003 * the user wants to use them.
3005 if (key->type == HCI_LK_DEBUG_COMBINATION)
3008 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3009 key->type, key->pin_len, NULL);
3012 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3014 hci_dev_unlock(hdev);
3019 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3020 u8 addr_type, struct sock *skip_sk)
3022 struct mgmt_ev_device_unpaired ev;
3024 bacpy(&ev.addr.bdaddr, bdaddr);
3025 ev.addr.type = addr_type;
3027 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3031 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3034 struct mgmt_cp_unpair_device *cp = data;
3035 struct mgmt_rp_unpair_device rp;
3036 struct hci_conn_params *params;
3037 struct mgmt_pending_cmd *cmd;
3038 struct hci_conn *conn;
3042 memset(&rp, 0, sizeof(rp));
3043 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3044 rp.addr.type = cp->addr.type;
3046 if (!bdaddr_type_is_valid(cp->addr.type))
3047 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3048 MGMT_STATUS_INVALID_PARAMS,
3051 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3052 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3053 MGMT_STATUS_INVALID_PARAMS,
3058 if (!hdev_is_powered(hdev)) {
3059 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3060 MGMT_STATUS_NOT_POWERED, &rp,
3065 if (cp->addr.type == BDADDR_BREDR) {
3066 /* If disconnection is requested, then look up the
3067 * connection. If the remote device is connected, it
3068 * will be later used to terminate the link.
3070 * Setting it to NULL explicitly will cause no
3071 * termination of the link.
3074 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3079 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3081 err = mgmt_cmd_complete(sk, hdev->id,
3082 MGMT_OP_UNPAIR_DEVICE,
3083 MGMT_STATUS_NOT_PAIRED, &rp,
3091 /* LE address type */
3092 addr_type = le_addr_type(cp->addr.type);
3094 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3095 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3097 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3098 MGMT_STATUS_NOT_PAIRED, &rp,
3103 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3105 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3110 /* Defer clearing up the connection parameters until closing to
3111 * give a chance of keeping them if a repairing happens.
3113 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3115 /* Disable auto-connection parameters if present */
3116 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3118 if (params->explicit_connect)
3119 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3121 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3124 /* If disconnection is not requested, then clear the connection
3125 * variable so that the link is not terminated.
3127 if (!cp->disconnect)
3131 /* If the connection variable is set, then termination of the
3132 * link is requested.
3135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3137 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3141 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3148 cmd->cmd_complete = addr_cmd_complete;
3150 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3152 mgmt_pending_remove(cmd);
3155 hci_dev_unlock(hdev);
3159 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3162 struct mgmt_cp_disconnect *cp = data;
3163 struct mgmt_rp_disconnect rp;
3164 struct mgmt_pending_cmd *cmd;
3165 struct hci_conn *conn;
3170 memset(&rp, 0, sizeof(rp));
3171 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3172 rp.addr.type = cp->addr.type;
3174 if (!bdaddr_type_is_valid(cp->addr.type))
3175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3176 MGMT_STATUS_INVALID_PARAMS,
3181 if (!test_bit(HCI_UP, &hdev->flags)) {
3182 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3183 MGMT_STATUS_NOT_POWERED, &rp,
3188 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3189 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3190 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3194 if (cp->addr.type == BDADDR_BREDR)
3195 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3198 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3199 le_addr_type(cp->addr.type));
3201 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3203 MGMT_STATUS_NOT_CONNECTED, &rp,
3208 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3214 cmd->cmd_complete = generic_cmd_complete;
3216 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3218 mgmt_pending_remove(cmd);
3221 hci_dev_unlock(hdev);
3225 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3227 switch (link_type) {
3229 switch (addr_type) {
3230 case ADDR_LE_DEV_PUBLIC:
3231 return BDADDR_LE_PUBLIC;
3234 /* Fallback to LE Random address type */
3235 return BDADDR_LE_RANDOM;
3239 /* Fallback to BR/EDR type */
3240 return BDADDR_BREDR;
3244 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3247 struct mgmt_rp_get_connections *rp;
3257 if (!hdev_is_powered(hdev)) {
3258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3259 MGMT_STATUS_NOT_POWERED);
3264 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3265 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3269 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3270 rp = kmalloc(rp_len, GFP_KERNEL);
3277 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3278 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3280 bacpy(&rp->addr[i].bdaddr, &c->dst);
3281 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3282 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3287 rp->conn_count = cpu_to_le16(i);
3289 /* Recalculate length in case of filtered SCO connections, etc */
3290 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3298 hci_dev_unlock(hdev);
3302 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3303 struct mgmt_cp_pin_code_neg_reply *cp)
3305 struct mgmt_pending_cmd *cmd;
3308 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3313 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3314 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3316 mgmt_pending_remove(cmd);
3321 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3324 struct hci_conn *conn;
3325 struct mgmt_cp_pin_code_reply *cp = data;
3326 struct hci_cp_pin_code_reply reply;
3327 struct mgmt_pending_cmd *cmd;
3334 if (!hdev_is_powered(hdev)) {
3335 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3336 MGMT_STATUS_NOT_POWERED);
3340 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3342 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3343 MGMT_STATUS_NOT_CONNECTED);
3347 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3348 struct mgmt_cp_pin_code_neg_reply ncp;
3350 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3352 BT_ERR("PIN code is not 16 bytes long");
3354 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3356 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3357 MGMT_STATUS_INVALID_PARAMS);
3362 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3368 cmd->cmd_complete = addr_cmd_complete;
3370 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3371 reply.pin_len = cp->pin_len;
3372 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3374 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3376 mgmt_pending_remove(cmd);
3379 hci_dev_unlock(hdev);
3383 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3386 struct mgmt_cp_set_io_capability *cp = data;
3390 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3391 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3392 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3396 hdev->io_capability = cp->io_capability;
3398 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3399 hdev->io_capability);
3401 hci_dev_unlock(hdev);
3403 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3407 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3409 struct hci_dev *hdev = conn->hdev;
3410 struct mgmt_pending_cmd *cmd;
3412 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3413 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3416 if (cmd->user_data != conn)
3425 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3427 struct mgmt_rp_pair_device rp;
3428 struct hci_conn *conn = cmd->user_data;
3431 bacpy(&rp.addr.bdaddr, &conn->dst);
3432 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3434 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3435 status, &rp, sizeof(rp));
3437 /* So we don't get further callbacks for this connection */
3438 conn->connect_cfm_cb = NULL;
3439 conn->security_cfm_cb = NULL;
3440 conn->disconn_cfm_cb = NULL;
3442 hci_conn_drop(conn);
3444 /* The device is paired so there is no need to remove
3445 * its connection parameters anymore.
3447 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3454 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3456 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3457 struct mgmt_pending_cmd *cmd;
3459 cmd = find_pairing(conn);
3461 cmd->cmd_complete(cmd, status);
3462 mgmt_pending_remove(cmd);
3466 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3468 struct mgmt_pending_cmd *cmd;
3470 BT_DBG("status %u", status);
3472 cmd = find_pairing(conn);
3474 BT_DBG("Unable to find a pending command");
3478 cmd->cmd_complete(cmd, mgmt_status(status));
3479 mgmt_pending_remove(cmd);
3482 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3484 struct mgmt_pending_cmd *cmd;
3486 BT_DBG("status %u", status);
3491 cmd = find_pairing(conn);
3493 BT_DBG("Unable to find a pending command");
3497 cmd->cmd_complete(cmd, mgmt_status(status));
3498 mgmt_pending_remove(cmd);
3501 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3504 struct mgmt_cp_pair_device *cp = data;
3505 struct mgmt_rp_pair_device rp;
3506 struct mgmt_pending_cmd *cmd;
3507 u8 sec_level, auth_type;
3508 struct hci_conn *conn;
3513 memset(&rp, 0, sizeof(rp));
3514 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3515 rp.addr.type = cp->addr.type;
3517 if (!bdaddr_type_is_valid(cp->addr.type))
3518 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 MGMT_STATUS_INVALID_PARAMS,
3522 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3523 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3524 MGMT_STATUS_INVALID_PARAMS,
3529 if (!hdev_is_powered(hdev)) {
3530 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3531 MGMT_STATUS_NOT_POWERED, &rp,
3536 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3538 MGMT_STATUS_ALREADY_PAIRED, &rp,
3543 sec_level = BT_SECURITY_MEDIUM;
3544 auth_type = HCI_AT_DEDICATED_BONDING;
3546 if (cp->addr.type == BDADDR_BREDR) {
3547 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3550 u8 addr_type = le_addr_type(cp->addr.type);
3551 struct hci_conn_params *p;
3553 /* When pairing a new device, it is expected to remember
3554 * this device for future connections. Adding the connection
3555 * parameter information ahead of time allows tracking
3556 * of the slave preferred values and will speed up any
3557 * further connection establishment.
3559 * If connection parameters already exist, then they
3560 * will be kept and this function does nothing.
3562 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3564 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3565 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3567 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
3568 addr_type, sec_level,
3569 HCI_LE_CONN_TIMEOUT,
3576 if (PTR_ERR(conn) == -EBUSY)
3577 status = MGMT_STATUS_BUSY;
3578 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3579 status = MGMT_STATUS_NOT_SUPPORTED;
3580 else if (PTR_ERR(conn) == -ECONNREFUSED)
3581 status = MGMT_STATUS_REJECTED;
3583 status = MGMT_STATUS_CONNECT_FAILED;
3585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3586 status, &rp, sizeof(rp));
3590 if (conn->connect_cfm_cb) {
3591 hci_conn_drop(conn);
3592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3593 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3597 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3600 hci_conn_drop(conn);
3604 cmd->cmd_complete = pairing_complete;
3606 /* For LE, just connecting isn't a proof that the pairing finished */
3607 if (cp->addr.type == BDADDR_BREDR) {
3608 conn->connect_cfm_cb = pairing_complete_cb;
3609 conn->security_cfm_cb = pairing_complete_cb;
3610 conn->disconn_cfm_cb = pairing_complete_cb;
3612 conn->connect_cfm_cb = le_pairing_complete_cb;
3613 conn->security_cfm_cb = le_pairing_complete_cb;
3614 conn->disconn_cfm_cb = le_pairing_complete_cb;
3617 conn->io_capability = cp->io_cap;
3618 cmd->user_data = hci_conn_get(conn);
3620 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3621 hci_conn_security(conn, sec_level, auth_type, true)) {
3622 cmd->cmd_complete(cmd, 0);
3623 mgmt_pending_remove(cmd);
3629 hci_dev_unlock(hdev);
3633 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3636 struct mgmt_addr_info *addr = data;
3637 struct mgmt_pending_cmd *cmd;
3638 struct hci_conn *conn;
3645 if (!hdev_is_powered(hdev)) {
3646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3647 MGMT_STATUS_NOT_POWERED);
3651 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3654 MGMT_STATUS_INVALID_PARAMS);
3658 conn = cmd->user_data;
3660 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3662 MGMT_STATUS_INVALID_PARAMS);
3666 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3667 mgmt_pending_remove(cmd);
3669 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3670 addr, sizeof(*addr));
3672 hci_dev_unlock(hdev);
3676 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3677 struct mgmt_addr_info *addr, u16 mgmt_op,
3678 u16 hci_op, __le32 passkey)
3680 struct mgmt_pending_cmd *cmd;
3681 struct hci_conn *conn;
3686 if (!hdev_is_powered(hdev)) {
3687 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3688 MGMT_STATUS_NOT_POWERED, addr,
3693 if (addr->type == BDADDR_BREDR)
3694 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3696 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3697 le_addr_type(addr->type));
3700 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3701 MGMT_STATUS_NOT_CONNECTED, addr,
3706 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3707 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3709 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3710 MGMT_STATUS_SUCCESS, addr,
3713 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3714 MGMT_STATUS_FAILED, addr,
3720 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3726 cmd->cmd_complete = addr_cmd_complete;
3728 /* Continue with pairing via HCI */
3729 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3730 struct hci_cp_user_passkey_reply cp;
3732 bacpy(&cp.bdaddr, &addr->bdaddr);
3733 cp.passkey = passkey;
3734 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3736 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3740 mgmt_pending_remove(cmd);
3743 hci_dev_unlock(hdev);
3747 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3748 void *data, u16 len)
3750 struct mgmt_cp_pin_code_neg_reply *cp = data;
3754 return user_pairing_resp(sk, hdev, &cp->addr,
3755 MGMT_OP_PIN_CODE_NEG_REPLY,
3756 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3759 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3762 struct mgmt_cp_user_confirm_reply *cp = data;
3766 if (len != sizeof(*cp))
3767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3768 MGMT_STATUS_INVALID_PARAMS);
3770 return user_pairing_resp(sk, hdev, &cp->addr,
3771 MGMT_OP_USER_CONFIRM_REPLY,
3772 HCI_OP_USER_CONFIRM_REPLY, 0);
3775 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3776 void *data, u16 len)
3778 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3782 return user_pairing_resp(sk, hdev, &cp->addr,
3783 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3784 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3787 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3790 struct mgmt_cp_user_passkey_reply *cp = data;
3794 return user_pairing_resp(sk, hdev, &cp->addr,
3795 MGMT_OP_USER_PASSKEY_REPLY,
3796 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3799 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3800 void *data, u16 len)
3802 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3806 return user_pairing_resp(sk, hdev, &cp->addr,
3807 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3808 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3811 static void update_name(struct hci_request *req)
3813 struct hci_dev *hdev = req->hdev;
3814 struct hci_cp_write_local_name cp;
3816 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3818 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3821 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3823 struct mgmt_cp_set_local_name *cp;
3824 struct mgmt_pending_cmd *cmd;
3826 BT_DBG("status 0x%02x", status);
3830 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3837 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3838 mgmt_status(status));
3840 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3843 mgmt_pending_remove(cmd);
3846 hci_dev_unlock(hdev);
3849 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3852 struct mgmt_cp_set_local_name *cp = data;
3853 struct mgmt_pending_cmd *cmd;
3854 struct hci_request req;
3861 /* If the old values are the same as the new ones just return a
3862 * direct command complete event.
3864 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3865 !memcmp(hdev->short_name, cp->short_name,
3866 sizeof(hdev->short_name))) {
3867 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3872 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3874 if (!hdev_is_powered(hdev)) {
3875 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3877 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3882 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3888 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3894 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3896 hci_req_init(&req, hdev);
3898 if (lmp_bredr_capable(hdev)) {
3903 /* The name is stored in the scan response data and so
3904 * no need to udpate the advertising data here.
3906 if (lmp_le_capable(hdev))
3907 update_scan_rsp_data(&req);
3909 err = hci_req_run(&req, set_name_complete);
3911 mgmt_pending_remove(cmd);
3914 hci_dev_unlock(hdev);
3918 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3919 u16 opcode, struct sk_buff *skb)
3921 struct mgmt_rp_read_local_oob_data mgmt_rp;
3922 size_t rp_size = sizeof(mgmt_rp);
3923 struct mgmt_pending_cmd *cmd;
3925 BT_DBG("%s status %u", hdev->name, status);
3927 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3931 if (status || !skb) {
3932 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3933 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3937 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3939 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3942 if (skb->len < sizeof(*rp)) {
3943 mgmt_cmd_status(cmd->sk, hdev->id,
3944 MGMT_OP_READ_LOCAL_OOB_DATA,
3945 MGMT_STATUS_FAILED);
3949 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3950 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3952 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3954 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3956 if (skb->len < sizeof(*rp)) {
3957 mgmt_cmd_status(cmd->sk, hdev->id,
3958 MGMT_OP_READ_LOCAL_OOB_DATA,
3959 MGMT_STATUS_FAILED);
3963 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3964 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3966 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3967 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3970 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3971 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3974 mgmt_pending_remove(cmd);
3977 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3978 void *data, u16 data_len)
3980 struct mgmt_pending_cmd *cmd;
3981 struct hci_request req;
3984 BT_DBG("%s", hdev->name);
3988 if (!hdev_is_powered(hdev)) {
3989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3990 MGMT_STATUS_NOT_POWERED);
3994 if (!lmp_ssp_capable(hdev)) {
3995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3996 MGMT_STATUS_NOT_SUPPORTED);
4000 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4006 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4012 hci_req_init(&req, hdev);
4014 if (bredr_sc_enabled(hdev))
4015 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4017 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4019 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4021 mgmt_pending_remove(cmd);
4024 hci_dev_unlock(hdev);
4028 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4029 void *data, u16 len)
4031 struct mgmt_addr_info *addr = data;
4034 BT_DBG("%s ", hdev->name);
4036 if (!bdaddr_type_is_valid(addr->type))
4037 return mgmt_cmd_complete(sk, hdev->id,
4038 MGMT_OP_ADD_REMOTE_OOB_DATA,
4039 MGMT_STATUS_INVALID_PARAMS,
4040 addr, sizeof(*addr));
4044 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4045 struct mgmt_cp_add_remote_oob_data *cp = data;
4048 if (cp->addr.type != BDADDR_BREDR) {
4049 err = mgmt_cmd_complete(sk, hdev->id,
4050 MGMT_OP_ADD_REMOTE_OOB_DATA,
4051 MGMT_STATUS_INVALID_PARAMS,
4052 &cp->addr, sizeof(cp->addr));
4056 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4057 cp->addr.type, cp->hash,
4058 cp->rand, NULL, NULL);
4060 status = MGMT_STATUS_FAILED;
4062 status = MGMT_STATUS_SUCCESS;
4064 err = mgmt_cmd_complete(sk, hdev->id,
4065 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4066 &cp->addr, sizeof(cp->addr));
4067 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4068 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4069 u8 *rand192, *hash192, *rand256, *hash256;
4072 if (bdaddr_type_is_le(cp->addr.type)) {
4073 /* Enforce zero-valued 192-bit parameters as
4074 * long as legacy SMP OOB isn't implemented.
4076 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4077 memcmp(cp->hash192, ZERO_KEY, 16)) {
4078 err = mgmt_cmd_complete(sk, hdev->id,
4079 MGMT_OP_ADD_REMOTE_OOB_DATA,
4080 MGMT_STATUS_INVALID_PARAMS,
4081 addr, sizeof(*addr));
4088 /* In case one of the P-192 values is set to zero,
4089 * then just disable OOB data for P-192.
4091 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4092 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4096 rand192 = cp->rand192;
4097 hash192 = cp->hash192;
4101 /* In case one of the P-256 values is set to zero, then just
4102 * disable OOB data for P-256.
4104 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4105 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4109 rand256 = cp->rand256;
4110 hash256 = cp->hash256;
4113 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4114 cp->addr.type, hash192, rand192,
4117 status = MGMT_STATUS_FAILED;
4119 status = MGMT_STATUS_SUCCESS;
4121 err = mgmt_cmd_complete(sk, hdev->id,
4122 MGMT_OP_ADD_REMOTE_OOB_DATA,
4123 status, &cp->addr, sizeof(cp->addr));
4125 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
4126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4127 MGMT_STATUS_INVALID_PARAMS);
4131 hci_dev_unlock(hdev);
4135 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4136 void *data, u16 len)
4138 struct mgmt_cp_remove_remote_oob_data *cp = data;
4142 BT_DBG("%s", hdev->name);
4144 if (cp->addr.type != BDADDR_BREDR)
4145 return mgmt_cmd_complete(sk, hdev->id,
4146 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4147 MGMT_STATUS_INVALID_PARAMS,
4148 &cp->addr, sizeof(cp->addr));
4152 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4153 hci_remote_oob_data_clear(hdev);
4154 status = MGMT_STATUS_SUCCESS;
4158 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4160 status = MGMT_STATUS_INVALID_PARAMS;
4162 status = MGMT_STATUS_SUCCESS;
4165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4166 status, &cp->addr, sizeof(cp->addr));
4168 hci_dev_unlock(hdev);
4172 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4174 struct hci_dev *hdev = req->hdev;
4175 struct hci_cp_inquiry cp;
4176 /* General inquiry access code (GIAC) */
4177 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4179 *status = mgmt_bredr_support(hdev);
4183 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4184 *status = MGMT_STATUS_BUSY;
4188 hci_inquiry_cache_flush(hdev);
4190 memset(&cp, 0, sizeof(cp));
4191 memcpy(&cp.lap, lap, sizeof(cp.lap));
4192 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4194 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4199 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4201 struct hci_dev *hdev = req->hdev;
4202 struct hci_cp_le_set_scan_param param_cp;
4203 struct hci_cp_le_set_scan_enable enable_cp;
4207 *status = mgmt_le_support(hdev);
4211 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4212 /* Don't let discovery abort an outgoing connection attempt
4213 * that's using directed advertising.
4215 if (hci_lookup_le_connect(hdev)) {
4216 *status = MGMT_STATUS_REJECTED;
4220 cancel_adv_timeout(hdev);
4221 disable_advertising(req);
4224 /* If controller is scanning, it means the background scanning is
4225 * running. Thus, we should temporarily stop it in order to set the
4226 * discovery scanning parameters.
4228 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4229 hci_req_add_le_scan_disable(req);
4231 /* All active scans will be done with either a resolvable private
4232 * address (when privacy feature has been enabled) or non-resolvable
4235 err = hci_update_random_address(req, true, &own_addr_type);
4237 *status = MGMT_STATUS_FAILED;
4241 memset(¶m_cp, 0, sizeof(param_cp));
4242 param_cp.type = LE_SCAN_ACTIVE;
4243 param_cp.interval = cpu_to_le16(interval);
4244 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4245 param_cp.own_address_type = own_addr_type;
4247 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4250 memset(&enable_cp, 0, sizeof(enable_cp));
4251 enable_cp.enable = LE_SCAN_ENABLE;
4252 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4254 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4260 static bool trigger_discovery(struct hci_request *req, u8 *status)
4262 struct hci_dev *hdev = req->hdev;
4264 switch (hdev->discovery.type) {
4265 case DISCOV_TYPE_BREDR:
4266 if (!trigger_bredr_inquiry(req, status))
4270 case DISCOV_TYPE_INTERLEAVED:
4271 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4273 /* During simultaneous discovery, we double LE scan
4274 * interval. We must leave some time for the controller
4275 * to do BR/EDR inquiry.
4277 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4281 if (!trigger_bredr_inquiry(req, status))
4287 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4288 *status = MGMT_STATUS_NOT_SUPPORTED;
4293 case DISCOV_TYPE_LE:
4294 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4299 *status = MGMT_STATUS_INVALID_PARAMS;
4306 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4309 struct mgmt_pending_cmd *cmd;
4310 unsigned long timeout;
4312 BT_DBG("status %d", status);
4316 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4318 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4321 cmd->cmd_complete(cmd, mgmt_status(status));
4322 mgmt_pending_remove(cmd);
4326 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4330 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4332 /* If the scan involves LE scan, pick proper timeout to schedule
4333 * hdev->le_scan_disable that will stop it.
4335 switch (hdev->discovery.type) {
4336 case DISCOV_TYPE_LE:
4337 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4339 case DISCOV_TYPE_INTERLEAVED:
4340 /* When running simultaneous discovery, the LE scanning time
4341 * should occupy the whole discovery time sine BR/EDR inquiry
4342 * and LE scanning are scheduled by the controller.
4344 * For interleaving discovery in comparison, BR/EDR inquiry
4345 * and LE scanning are done sequentially with separate
4348 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4349 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4351 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4353 case DISCOV_TYPE_BREDR:
4357 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4363 /* When service discovery is used and the controller has
4364 * a strict duplicate filter, it is important to remember
4365 * the start and duration of the scan. This is required
4366 * for restarting scanning during the discovery phase.
4368 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4370 hdev->discovery.result_filtering) {
4371 hdev->discovery.scan_start = jiffies;
4372 hdev->discovery.scan_duration = timeout;
4375 queue_delayed_work(hdev->workqueue,
4376 &hdev->le_scan_disable, timeout);
4380 hci_dev_unlock(hdev);
4383 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4384 void *data, u16 len)
4386 struct mgmt_cp_start_discovery *cp = data;
4387 struct mgmt_pending_cmd *cmd;
4388 struct hci_request req;
4392 BT_DBG("%s", hdev->name);
4396 if (!hdev_is_powered(hdev)) {
4397 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4398 MGMT_STATUS_NOT_POWERED,
4399 &cp->type, sizeof(cp->type));
4403 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4404 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4406 MGMT_STATUS_BUSY, &cp->type,
4411 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4417 cmd->cmd_complete = generic_cmd_complete;
4419 /* Clear the discovery filter first to free any previously
4420 * allocated memory for the UUID list.
4422 hci_discovery_filter_clear(hdev);
4424 hdev->discovery.type = cp->type;
4425 hdev->discovery.report_invalid_rssi = false;
4427 hci_req_init(&req, hdev);
4429 if (!trigger_discovery(&req, &status)) {
4430 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4431 status, &cp->type, sizeof(cp->type));
4432 mgmt_pending_remove(cmd);
4436 err = hci_req_run(&req, start_discovery_complete);
4438 mgmt_pending_remove(cmd);
4442 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4445 hci_dev_unlock(hdev);
4449 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4452 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4456 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4457 void *data, u16 len)
4459 struct mgmt_cp_start_service_discovery *cp = data;
4460 struct mgmt_pending_cmd *cmd;
4461 struct hci_request req;
4462 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4463 u16 uuid_count, expected_len;
4467 BT_DBG("%s", hdev->name);
4471 if (!hdev_is_powered(hdev)) {
4472 err = mgmt_cmd_complete(sk, hdev->id,
4473 MGMT_OP_START_SERVICE_DISCOVERY,
4474 MGMT_STATUS_NOT_POWERED,
4475 &cp->type, sizeof(cp->type));
4479 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4480 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4481 err = mgmt_cmd_complete(sk, hdev->id,
4482 MGMT_OP_START_SERVICE_DISCOVERY,
4483 MGMT_STATUS_BUSY, &cp->type,
4488 uuid_count = __le16_to_cpu(cp->uuid_count);
4489 if (uuid_count > max_uuid_count) {
4490 BT_ERR("service_discovery: too big uuid_count value %u",
4492 err = mgmt_cmd_complete(sk, hdev->id,
4493 MGMT_OP_START_SERVICE_DISCOVERY,
4494 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4499 expected_len = sizeof(*cp) + uuid_count * 16;
4500 if (expected_len != len) {
4501 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4503 err = mgmt_cmd_complete(sk, hdev->id,
4504 MGMT_OP_START_SERVICE_DISCOVERY,
4505 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4510 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4517 cmd->cmd_complete = service_discovery_cmd_complete;
4519 /* Clear the discovery filter first to free any previously
4520 * allocated memory for the UUID list.
4522 hci_discovery_filter_clear(hdev);
4524 hdev->discovery.result_filtering = true;
4525 hdev->discovery.type = cp->type;
4526 hdev->discovery.rssi = cp->rssi;
4527 hdev->discovery.uuid_count = uuid_count;
4529 if (uuid_count > 0) {
4530 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4532 if (!hdev->discovery.uuids) {
4533 err = mgmt_cmd_complete(sk, hdev->id,
4534 MGMT_OP_START_SERVICE_DISCOVERY,
4536 &cp->type, sizeof(cp->type));
4537 mgmt_pending_remove(cmd);
4542 hci_req_init(&req, hdev);
4544 if (!trigger_discovery(&req, &status)) {
4545 err = mgmt_cmd_complete(sk, hdev->id,
4546 MGMT_OP_START_SERVICE_DISCOVERY,
4547 status, &cp->type, sizeof(cp->type));
4548 mgmt_pending_remove(cmd);
4552 err = hci_req_run(&req, start_discovery_complete);
4554 mgmt_pending_remove(cmd);
4558 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4561 hci_dev_unlock(hdev);
4565 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4567 struct mgmt_pending_cmd *cmd;
4569 BT_DBG("status %d", status);
4573 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4575 cmd->cmd_complete(cmd, mgmt_status(status));
4576 mgmt_pending_remove(cmd);
4580 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4582 hci_dev_unlock(hdev);
4585 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4588 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4589 struct mgmt_pending_cmd *cmd;
4590 struct hci_request req;
4593 BT_DBG("%s", hdev->name);
4597 if (!hci_discovery_active(hdev)) {
4598 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4599 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4600 sizeof(mgmt_cp->type));
4604 if (hdev->discovery.type != mgmt_cp->type) {
4605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4606 MGMT_STATUS_INVALID_PARAMS,
4607 &mgmt_cp->type, sizeof(mgmt_cp->type));
4611 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4617 cmd->cmd_complete = generic_cmd_complete;
4619 hci_req_init(&req, hdev);
4621 hci_stop_discovery(&req);
4623 err = hci_req_run(&req, stop_discovery_complete);
4625 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4629 mgmt_pending_remove(cmd);
4631 /* If no HCI commands were sent we're done */
4632 if (err == -ENODATA) {
4633 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4634 &mgmt_cp->type, sizeof(mgmt_cp->type));
4635 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4639 hci_dev_unlock(hdev);
4643 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4646 struct mgmt_cp_confirm_name *cp = data;
4647 struct inquiry_entry *e;
4650 BT_DBG("%s", hdev->name);
4654 if (!hci_discovery_active(hdev)) {
4655 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4656 MGMT_STATUS_FAILED, &cp->addr,
4661 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4663 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4664 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4669 if (cp->name_known) {
4670 e->name_state = NAME_KNOWN;
4673 e->name_state = NAME_NEEDED;
4674 hci_inquiry_cache_update_resolve(hdev, e);
4677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4678 &cp->addr, sizeof(cp->addr));
4681 hci_dev_unlock(hdev);
4685 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4688 struct mgmt_cp_block_device *cp = data;
4692 BT_DBG("%s", hdev->name);
4694 if (!bdaddr_type_is_valid(cp->addr.type))
4695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4696 MGMT_STATUS_INVALID_PARAMS,
4697 &cp->addr, sizeof(cp->addr));
4701 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4704 status = MGMT_STATUS_FAILED;
4708 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4710 status = MGMT_STATUS_SUCCESS;
4713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4714 &cp->addr, sizeof(cp->addr));
4716 hci_dev_unlock(hdev);
4721 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4724 struct mgmt_cp_unblock_device *cp = data;
4728 BT_DBG("%s", hdev->name);
4730 if (!bdaddr_type_is_valid(cp->addr.type))
4731 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4732 MGMT_STATUS_INVALID_PARAMS,
4733 &cp->addr, sizeof(cp->addr));
4737 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4740 status = MGMT_STATUS_INVALID_PARAMS;
4744 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4746 status = MGMT_STATUS_SUCCESS;
4749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4750 &cp->addr, sizeof(cp->addr));
4752 hci_dev_unlock(hdev);
4757 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4760 struct mgmt_cp_set_device_id *cp = data;
4761 struct hci_request req;
4765 BT_DBG("%s", hdev->name);
4767 source = __le16_to_cpu(cp->source);
4769 if (source > 0x0002)
4770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4771 MGMT_STATUS_INVALID_PARAMS);
4775 hdev->devid_source = source;
4776 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4777 hdev->devid_product = __le16_to_cpu(cp->product);
4778 hdev->devid_version = __le16_to_cpu(cp->version);
4780 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4783 hci_req_init(&req, hdev);
4785 hci_req_run(&req, NULL);
4787 hci_dev_unlock(hdev);
4792 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4795 BT_DBG("status %d", status);
4798 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4801 struct cmd_lookup match = { NULL, hdev };
4802 struct hci_request req;
4804 struct adv_info *adv_instance;
4810 u8 mgmt_err = mgmt_status(status);
4812 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4813 cmd_status_rsp, &mgmt_err);
4817 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4818 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4820 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4822 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4825 new_settings(hdev, match.sk);
4830 /* If "Set Advertising" was just disabled and instance advertising was
4831 * set up earlier, then re-enable multi-instance advertising.
4833 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4834 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) ||
4835 list_empty(&hdev->adv_instances))
4838 instance = hdev->cur_adv_instance;
4840 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4841 struct adv_info, list);
4845 instance = adv_instance->instance;
4848 hci_req_init(&req, hdev);
4850 err = schedule_adv_instance(&req, instance, true);
4853 err = hci_req_run(&req, enable_advertising_instance);
4856 BT_ERR("Failed to re-configure advertising");
4859 hci_dev_unlock(hdev);
4862 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4865 struct mgmt_mode *cp = data;
4866 struct mgmt_pending_cmd *cmd;
4867 struct hci_request req;
4871 BT_DBG("request for %s", hdev->name);
4873 status = mgmt_le_support(hdev);
4875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4878 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4880 MGMT_STATUS_INVALID_PARAMS);
4886 /* The following conditions are ones which mean that we should
4887 * not do any HCI communication but directly send a mgmt
4888 * response to user space (after toggling the flag if
4891 if (!hdev_is_powered(hdev) ||
4892 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4893 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4894 hci_conn_num(hdev, LE_LINK) > 0 ||
4895 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4896 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4900 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4901 if (cp->val == 0x02)
4902 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4904 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4906 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4907 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4910 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4915 err = new_settings(hdev, sk);
4920 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4921 pending_find(MGMT_OP_SET_LE, hdev)) {
4922 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4927 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4933 hci_req_init(&req, hdev);
4935 if (cp->val == 0x02)
4936 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4938 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4940 cancel_adv_timeout(hdev);
4943 /* Switch to instance "0" for the Set Advertising setting.
4944 * We cannot use update_[adv|scan_rsp]_data() here as the
4945 * HCI_ADVERTISING flag is not yet set.
4947 update_inst_adv_data(&req, 0x00);
4948 update_inst_scan_rsp_data(&req, 0x00);
4949 enable_advertising(&req);
4951 disable_advertising(&req);
4954 err = hci_req_run(&req, set_advertising_complete);
4956 mgmt_pending_remove(cmd);
4959 hci_dev_unlock(hdev);
4963 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4964 void *data, u16 len)
4966 struct mgmt_cp_set_static_address *cp = data;
4969 BT_DBG("%s", hdev->name);
4971 if (!lmp_le_capable(hdev))
4972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4973 MGMT_STATUS_NOT_SUPPORTED);
4975 if (hdev_is_powered(hdev))
4976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4977 MGMT_STATUS_REJECTED);
4979 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4980 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4981 return mgmt_cmd_status(sk, hdev->id,
4982 MGMT_OP_SET_STATIC_ADDRESS,
4983 MGMT_STATUS_INVALID_PARAMS);
4985 /* Two most significant bits shall be set */
4986 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4987 return mgmt_cmd_status(sk, hdev->id,
4988 MGMT_OP_SET_STATIC_ADDRESS,
4989 MGMT_STATUS_INVALID_PARAMS);
4994 bacpy(&hdev->static_addr, &cp->bdaddr);
4996 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5000 err = new_settings(hdev, sk);
5003 hci_dev_unlock(hdev);
5007 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5008 void *data, u16 len)
5010 struct mgmt_cp_set_scan_params *cp = data;
5011 __u16 interval, window;
5014 BT_DBG("%s", hdev->name);
5016 if (!lmp_le_capable(hdev))
5017 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5018 MGMT_STATUS_NOT_SUPPORTED);
5020 interval = __le16_to_cpu(cp->interval);
5022 if (interval < 0x0004 || interval > 0x4000)
5023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5024 MGMT_STATUS_INVALID_PARAMS);
5026 window = __le16_to_cpu(cp->window);
5028 if (window < 0x0004 || window > 0x4000)
5029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5030 MGMT_STATUS_INVALID_PARAMS);
5032 if (window > interval)
5033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5034 MGMT_STATUS_INVALID_PARAMS);
5038 hdev->le_scan_interval = interval;
5039 hdev->le_scan_window = window;
5041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5044 /* If background scan is running, restart it so new parameters are
5047 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5048 hdev->discovery.state == DISCOVERY_STOPPED) {
5049 struct hci_request req;
5051 hci_req_init(&req, hdev);
5053 hci_req_add_le_scan_disable(&req);
5054 hci_req_add_le_passive_scan(&req);
5056 hci_req_run(&req, NULL);
5059 hci_dev_unlock(hdev);
5064 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5067 struct mgmt_pending_cmd *cmd;
5069 BT_DBG("status 0x%02x", status);
5073 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5078 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5079 mgmt_status(status));
5081 struct mgmt_mode *cp = cmd->param;
5084 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5086 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5088 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5089 new_settings(hdev, cmd->sk);
5092 mgmt_pending_remove(cmd);
5095 hci_dev_unlock(hdev);
5098 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5099 void *data, u16 len)
5101 struct mgmt_mode *cp = data;
5102 struct mgmt_pending_cmd *cmd;
5103 struct hci_request req;
5106 BT_DBG("%s", hdev->name);
5108 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5109 hdev->hci_ver < BLUETOOTH_VER_1_2)
5110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5111 MGMT_STATUS_NOT_SUPPORTED);
5113 if (cp->val != 0x00 && cp->val != 0x01)
5114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5115 MGMT_STATUS_INVALID_PARAMS);
5119 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5125 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5126 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5131 if (!hdev_is_powered(hdev)) {
5132 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5133 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5135 new_settings(hdev, sk);
5139 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5146 hci_req_init(&req, hdev);
5148 write_fast_connectable(&req, cp->val);
5150 err = hci_req_run(&req, fast_connectable_complete);
5152 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5153 MGMT_STATUS_FAILED);
5154 mgmt_pending_remove(cmd);
5158 hci_dev_unlock(hdev);
5163 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5165 struct mgmt_pending_cmd *cmd;
5167 BT_DBG("status 0x%02x", status);
5171 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5176 u8 mgmt_err = mgmt_status(status);
5178 /* We need to restore the flag if related HCI commands
5181 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5183 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5185 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5186 new_settings(hdev, cmd->sk);
5189 mgmt_pending_remove(cmd);
5192 hci_dev_unlock(hdev);
5195 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5197 struct mgmt_mode *cp = data;
5198 struct mgmt_pending_cmd *cmd;
5199 struct hci_request req;
5202 BT_DBG("request for %s", hdev->name);
5204 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5206 MGMT_STATUS_NOT_SUPPORTED);
5208 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5210 MGMT_STATUS_REJECTED);
5212 if (cp->val != 0x00 && cp->val != 0x01)
5213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5214 MGMT_STATUS_INVALID_PARAMS);
5218 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5219 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5223 if (!hdev_is_powered(hdev)) {
5225 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5226 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5227 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5228 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5229 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5232 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5234 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5238 err = new_settings(hdev, sk);
5242 /* Reject disabling when powered on */
5244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5245 MGMT_STATUS_REJECTED);
5248 /* When configuring a dual-mode controller to operate
5249 * with LE only and using a static address, then switching
5250 * BR/EDR back on is not allowed.
5252 * Dual-mode controllers shall operate with the public
5253 * address as its identity address for BR/EDR and LE. So
5254 * reject the attempt to create an invalid configuration.
5256 * The same restrictions applies when secure connections
5257 * has been enabled. For BR/EDR this is a controller feature
5258 * while for LE it is a host stack feature. This means that
5259 * switching BR/EDR back on when secure connections has been
5260 * enabled is not a supported transaction.
5262 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5263 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5264 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5265 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5266 MGMT_STATUS_REJECTED);
5271 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5272 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5277 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5283 /* We need to flip the bit already here so that update_adv_data
5284 * generates the correct flags.
5286 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5288 hci_req_init(&req, hdev);
5290 write_fast_connectable(&req, false);
5291 __hci_update_page_scan(&req);
5293 /* Since only the advertising data flags will change, there
5294 * is no need to update the scan response data.
5296 update_adv_data(&req);
5298 err = hci_req_run(&req, set_bredr_complete);
5300 mgmt_pending_remove(cmd);
5303 hci_dev_unlock(hdev);
5307 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5309 struct mgmt_pending_cmd *cmd;
5310 struct mgmt_mode *cp;
5312 BT_DBG("%s status %u", hdev->name, status);
5316 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5321 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5322 mgmt_status(status));
5330 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5331 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5334 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5335 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5338 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5339 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5343 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5344 new_settings(hdev, cmd->sk);
5347 mgmt_pending_remove(cmd);
5349 hci_dev_unlock(hdev);
5352 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5353 void *data, u16 len)
5355 struct mgmt_mode *cp = data;
5356 struct mgmt_pending_cmd *cmd;
5357 struct hci_request req;
5361 BT_DBG("request for %s", hdev->name);
5363 if (!lmp_sc_capable(hdev) &&
5364 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5366 MGMT_STATUS_NOT_SUPPORTED);
5368 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5369 lmp_sc_capable(hdev) &&
5370 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5372 MGMT_STATUS_REJECTED);
5374 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5376 MGMT_STATUS_INVALID_PARAMS);
5380 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5381 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5385 changed = !hci_dev_test_and_set_flag(hdev,
5387 if (cp->val == 0x02)
5388 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5390 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5392 changed = hci_dev_test_and_clear_flag(hdev,
5394 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5397 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5402 err = new_settings(hdev, sk);
5407 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5408 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5415 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5416 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5417 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5421 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5427 hci_req_init(&req, hdev);
5428 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5429 err = hci_req_run(&req, sc_enable_complete);
5431 mgmt_pending_remove(cmd);
5436 hci_dev_unlock(hdev);
5440 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5441 void *data, u16 len)
5443 struct mgmt_mode *cp = data;
5444 bool changed, use_changed;
5447 BT_DBG("request for %s", hdev->name);
5449 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5451 MGMT_STATUS_INVALID_PARAMS);
5456 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5458 changed = hci_dev_test_and_clear_flag(hdev,
5459 HCI_KEEP_DEBUG_KEYS);
5461 if (cp->val == 0x02)
5462 use_changed = !hci_dev_test_and_set_flag(hdev,
5463 HCI_USE_DEBUG_KEYS);
5465 use_changed = hci_dev_test_and_clear_flag(hdev,
5466 HCI_USE_DEBUG_KEYS);
5468 if (hdev_is_powered(hdev) && use_changed &&
5469 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5470 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5471 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5472 sizeof(mode), &mode);
5475 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5480 err = new_settings(hdev, sk);
5483 hci_dev_unlock(hdev);
5487 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5490 struct mgmt_cp_set_privacy *cp = cp_data;
5494 BT_DBG("request for %s", hdev->name);
5496 if (!lmp_le_capable(hdev))
5497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5498 MGMT_STATUS_NOT_SUPPORTED);
5500 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5502 MGMT_STATUS_INVALID_PARAMS);
5504 if (hdev_is_powered(hdev))
5505 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5506 MGMT_STATUS_REJECTED);
5510 /* If user space supports this command it is also expected to
5511 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5513 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5516 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5517 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5518 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5520 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5521 memset(hdev->irk, 0, sizeof(hdev->irk));
5522 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5525 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5530 err = new_settings(hdev, sk);
5533 hci_dev_unlock(hdev);
5537 static bool irk_is_valid(struct mgmt_irk_info *irk)
5539 switch (irk->addr.type) {
5540 case BDADDR_LE_PUBLIC:
5543 case BDADDR_LE_RANDOM:
5544 /* Two most significant bits shall be set */
5545 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5553 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5556 struct mgmt_cp_load_irks *cp = cp_data;
5557 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5558 sizeof(struct mgmt_irk_info));
5559 u16 irk_count, expected_len;
5562 BT_DBG("request for %s", hdev->name);
5564 if (!lmp_le_capable(hdev))
5565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5566 MGMT_STATUS_NOT_SUPPORTED);
5568 irk_count = __le16_to_cpu(cp->irk_count);
5569 if (irk_count > max_irk_count) {
5570 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5572 MGMT_STATUS_INVALID_PARAMS);
5575 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5576 if (expected_len != len) {
5577 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5580 MGMT_STATUS_INVALID_PARAMS);
5583 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5585 for (i = 0; i < irk_count; i++) {
5586 struct mgmt_irk_info *key = &cp->irks[i];
5588 if (!irk_is_valid(key))
5589 return mgmt_cmd_status(sk, hdev->id,
5591 MGMT_STATUS_INVALID_PARAMS);
5596 hci_smp_irks_clear(hdev);
5598 for (i = 0; i < irk_count; i++) {
5599 struct mgmt_irk_info *irk = &cp->irks[i];
5601 hci_add_irk(hdev, &irk->addr.bdaddr,
5602 le_addr_type(irk->addr.type), irk->val,
5606 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5608 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5610 hci_dev_unlock(hdev);
5615 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5617 if (key->master != 0x00 && key->master != 0x01)
5620 switch (key->addr.type) {
5621 case BDADDR_LE_PUBLIC:
5624 case BDADDR_LE_RANDOM:
5625 /* Two most significant bits shall be set */
5626 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5634 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5635 void *cp_data, u16 len)
5637 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5638 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5639 sizeof(struct mgmt_ltk_info));
5640 u16 key_count, expected_len;
5643 BT_DBG("request for %s", hdev->name);
5645 if (!lmp_le_capable(hdev))
5646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5647 MGMT_STATUS_NOT_SUPPORTED);
5649 key_count = __le16_to_cpu(cp->key_count);
5650 if (key_count > max_key_count) {
5651 BT_ERR("load_ltks: too big key_count value %u", key_count);
5652 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5653 MGMT_STATUS_INVALID_PARAMS);
5656 expected_len = sizeof(*cp) + key_count *
5657 sizeof(struct mgmt_ltk_info);
5658 if (expected_len != len) {
5659 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5662 MGMT_STATUS_INVALID_PARAMS);
5665 BT_DBG("%s key_count %u", hdev->name, key_count);
5667 for (i = 0; i < key_count; i++) {
5668 struct mgmt_ltk_info *key = &cp->keys[i];
5670 if (!ltk_is_valid(key))
5671 return mgmt_cmd_status(sk, hdev->id,
5672 MGMT_OP_LOAD_LONG_TERM_KEYS,
5673 MGMT_STATUS_INVALID_PARAMS);
5678 hci_smp_ltks_clear(hdev);
5680 for (i = 0; i < key_count; i++) {
5681 struct mgmt_ltk_info *key = &cp->keys[i];
5682 u8 type, authenticated;
5684 switch (key->type) {
5685 case MGMT_LTK_UNAUTHENTICATED:
5686 authenticated = 0x00;
5687 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5689 case MGMT_LTK_AUTHENTICATED:
5690 authenticated = 0x01;
5691 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5693 case MGMT_LTK_P256_UNAUTH:
5694 authenticated = 0x00;
5695 type = SMP_LTK_P256;
5697 case MGMT_LTK_P256_AUTH:
5698 authenticated = 0x01;
5699 type = SMP_LTK_P256;
5701 case MGMT_LTK_P256_DEBUG:
5702 authenticated = 0x00;
5703 type = SMP_LTK_P256_DEBUG;
5708 hci_add_ltk(hdev, &key->addr.bdaddr,
5709 le_addr_type(key->addr.type), type, authenticated,
5710 key->val, key->enc_size, key->ediv, key->rand);
5713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5716 hci_dev_unlock(hdev);
5721 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5723 struct hci_conn *conn = cmd->user_data;
5724 struct mgmt_rp_get_conn_info rp;
5727 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5729 if (status == MGMT_STATUS_SUCCESS) {
5730 rp.rssi = conn->rssi;
5731 rp.tx_power = conn->tx_power;
5732 rp.max_tx_power = conn->max_tx_power;
5734 rp.rssi = HCI_RSSI_INVALID;
5735 rp.tx_power = HCI_TX_POWER_INVALID;
5736 rp.max_tx_power = HCI_TX_POWER_INVALID;
5739 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5740 status, &rp, sizeof(rp));
5742 hci_conn_drop(conn);
5748 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5751 struct hci_cp_read_rssi *cp;
5752 struct mgmt_pending_cmd *cmd;
5753 struct hci_conn *conn;
5757 BT_DBG("status 0x%02x", hci_status);
5761 /* Commands sent in request are either Read RSSI or Read Transmit Power
5762 * Level so we check which one was last sent to retrieve connection
5763 * handle. Both commands have handle as first parameter so it's safe to
5764 * cast data on the same command struct.
5766 * First command sent is always Read RSSI and we fail only if it fails.
5767 * In other case we simply override error to indicate success as we
5768 * already remembered if TX power value is actually valid.
5770 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5772 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5773 status = MGMT_STATUS_SUCCESS;
5775 status = mgmt_status(hci_status);
5779 BT_ERR("invalid sent_cmd in conn_info response");
5783 handle = __le16_to_cpu(cp->handle);
5784 conn = hci_conn_hash_lookup_handle(hdev, handle);
5786 BT_ERR("unknown handle (%d) in conn_info response", handle);
5790 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5794 cmd->cmd_complete(cmd, status);
5795 mgmt_pending_remove(cmd);
5798 hci_dev_unlock(hdev);
5801 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5804 struct mgmt_cp_get_conn_info *cp = data;
5805 struct mgmt_rp_get_conn_info rp;
5806 struct hci_conn *conn;
5807 unsigned long conn_info_age;
5810 BT_DBG("%s", hdev->name);
5812 memset(&rp, 0, sizeof(rp));
5813 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5814 rp.addr.type = cp->addr.type;
5816 if (!bdaddr_type_is_valid(cp->addr.type))
5817 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5818 MGMT_STATUS_INVALID_PARAMS,
5823 if (!hdev_is_powered(hdev)) {
5824 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5825 MGMT_STATUS_NOT_POWERED, &rp,
5830 if (cp->addr.type == BDADDR_BREDR)
5831 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5834 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5836 if (!conn || conn->state != BT_CONNECTED) {
5837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5838 MGMT_STATUS_NOT_CONNECTED, &rp,
5843 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5844 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5845 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5849 /* To avoid client trying to guess when to poll again for information we
5850 * calculate conn info age as random value between min/max set in hdev.
5852 conn_info_age = hdev->conn_info_min_age +
5853 prandom_u32_max(hdev->conn_info_max_age -
5854 hdev->conn_info_min_age);
5856 /* Query controller to refresh cached values if they are too old or were
5859 if (time_after(jiffies, conn->conn_info_timestamp +
5860 msecs_to_jiffies(conn_info_age)) ||
5861 !conn->conn_info_timestamp) {
5862 struct hci_request req;
5863 struct hci_cp_read_tx_power req_txp_cp;
5864 struct hci_cp_read_rssi req_rssi_cp;
5865 struct mgmt_pending_cmd *cmd;
5867 hci_req_init(&req, hdev);
5868 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5869 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5872 /* For LE links TX power does not change thus we don't need to
5873 * query for it once value is known.
5875 if (!bdaddr_type_is_le(cp->addr.type) ||
5876 conn->tx_power == HCI_TX_POWER_INVALID) {
5877 req_txp_cp.handle = cpu_to_le16(conn->handle);
5878 req_txp_cp.type = 0x00;
5879 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5880 sizeof(req_txp_cp), &req_txp_cp);
5883 /* Max TX power needs to be read only once per connection */
5884 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5885 req_txp_cp.handle = cpu_to_le16(conn->handle);
5886 req_txp_cp.type = 0x01;
5887 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5888 sizeof(req_txp_cp), &req_txp_cp);
5891 err = hci_req_run(&req, conn_info_refresh_complete);
5895 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5902 hci_conn_hold(conn);
5903 cmd->user_data = hci_conn_get(conn);
5904 cmd->cmd_complete = conn_info_cmd_complete;
5906 conn->conn_info_timestamp = jiffies;
5908 /* Cache is valid, just reply with values cached in hci_conn */
5909 rp.rssi = conn->rssi;
5910 rp.tx_power = conn->tx_power;
5911 rp.max_tx_power = conn->max_tx_power;
5913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5914 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5918 hci_dev_unlock(hdev);
5922 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5924 struct hci_conn *conn = cmd->user_data;
5925 struct mgmt_rp_get_clock_info rp;
5926 struct hci_dev *hdev;
5929 memset(&rp, 0, sizeof(rp));
5930 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5935 hdev = hci_dev_get(cmd->index);
5937 rp.local_clock = cpu_to_le32(hdev->clock);
5942 rp.piconet_clock = cpu_to_le32(conn->clock);
5943 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5947 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5951 hci_conn_drop(conn);
5958 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5960 struct hci_cp_read_clock *hci_cp;
5961 struct mgmt_pending_cmd *cmd;
5962 struct hci_conn *conn;
5964 BT_DBG("%s status %u", hdev->name, status);
5968 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5972 if (hci_cp->which) {
5973 u16 handle = __le16_to_cpu(hci_cp->handle);
5974 conn = hci_conn_hash_lookup_handle(hdev, handle);
5979 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5983 cmd->cmd_complete(cmd, mgmt_status(status));
5984 mgmt_pending_remove(cmd);
5987 hci_dev_unlock(hdev);
5990 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5993 struct mgmt_cp_get_clock_info *cp = data;
5994 struct mgmt_rp_get_clock_info rp;
5995 struct hci_cp_read_clock hci_cp;
5996 struct mgmt_pending_cmd *cmd;
5997 struct hci_request req;
5998 struct hci_conn *conn;
6001 BT_DBG("%s", hdev->name);
6003 memset(&rp, 0, sizeof(rp));
6004 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6005 rp.addr.type = cp->addr.type;
6007 if (cp->addr.type != BDADDR_BREDR)
6008 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6009 MGMT_STATUS_INVALID_PARAMS,
6014 if (!hdev_is_powered(hdev)) {
6015 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6016 MGMT_STATUS_NOT_POWERED, &rp,
6021 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6024 if (!conn || conn->state != BT_CONNECTED) {
6025 err = mgmt_cmd_complete(sk, hdev->id,
6026 MGMT_OP_GET_CLOCK_INFO,
6027 MGMT_STATUS_NOT_CONNECTED,
6035 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6041 cmd->cmd_complete = clock_info_cmd_complete;
6043 hci_req_init(&req, hdev);
6045 memset(&hci_cp, 0, sizeof(hci_cp));
6046 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6049 hci_conn_hold(conn);
6050 cmd->user_data = hci_conn_get(conn);
6052 hci_cp.handle = cpu_to_le16(conn->handle);
6053 hci_cp.which = 0x01; /* Piconet clock */
6054 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6057 err = hci_req_run(&req, get_clock_info_complete);
6059 mgmt_pending_remove(cmd);
6062 hci_dev_unlock(hdev);
6066 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6068 struct hci_conn *conn;
6070 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6074 if (conn->dst_type != type)
6077 if (conn->state != BT_CONNECTED)
6083 /* This function requires the caller holds hdev->lock */
6084 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
6085 u8 addr_type, u8 auto_connect)
6087 struct hci_dev *hdev = req->hdev;
6088 struct hci_conn_params *params;
6090 params = hci_conn_params_add(hdev, addr, addr_type);
6094 if (params->auto_connect == auto_connect)
6097 list_del_init(¶ms->action);
6099 switch (auto_connect) {
6100 case HCI_AUTO_CONN_DISABLED:
6101 case HCI_AUTO_CONN_LINK_LOSS:
6102 /* If auto connect is being disabled when we're trying to
6103 * connect to device, keep connecting.
6105 if (params->explicit_connect)
6106 list_add(¶ms->action, &hdev->pend_le_conns);
6108 __hci_update_background_scan(req);
6110 case HCI_AUTO_CONN_REPORT:
6111 if (params->explicit_connect)
6112 list_add(¶ms->action, &hdev->pend_le_conns);
6114 list_add(¶ms->action, &hdev->pend_le_reports);
6115 __hci_update_background_scan(req);
6117 case HCI_AUTO_CONN_DIRECT:
6118 case HCI_AUTO_CONN_ALWAYS:
6119 if (!is_connected(hdev, addr, addr_type)) {
6120 list_add(¶ms->action, &hdev->pend_le_conns);
6121 /* If we are in scan phase of connecting, we were
6122 * already added to pend_le_conns and scanning.
6124 if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
6125 __hci_update_background_scan(req);
6130 params->auto_connect = auto_connect;
6132 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
6138 static void device_added(struct sock *sk, struct hci_dev *hdev,
6139 bdaddr_t *bdaddr, u8 type, u8 action)
6141 struct mgmt_ev_device_added ev;
6143 bacpy(&ev.addr.bdaddr, bdaddr);
6144 ev.addr.type = type;
6147 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6150 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6152 struct mgmt_pending_cmd *cmd;
6154 BT_DBG("status 0x%02x", status);
6158 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6162 cmd->cmd_complete(cmd, mgmt_status(status));
6163 mgmt_pending_remove(cmd);
6166 hci_dev_unlock(hdev);
6169 static int add_device(struct sock *sk, struct hci_dev *hdev,
6170 void *data, u16 len)
6172 struct mgmt_cp_add_device *cp = data;
6173 struct mgmt_pending_cmd *cmd;
6174 struct hci_request req;
6175 u8 auto_conn, addr_type;
6178 BT_DBG("%s", hdev->name);
6180 if (!bdaddr_type_is_valid(cp->addr.type) ||
6181 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6183 MGMT_STATUS_INVALID_PARAMS,
6184 &cp->addr, sizeof(cp->addr));
6186 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6187 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6188 MGMT_STATUS_INVALID_PARAMS,
6189 &cp->addr, sizeof(cp->addr));
6191 hci_req_init(&req, hdev);
6195 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6201 cmd->cmd_complete = addr_cmd_complete;
6203 if (cp->addr.type == BDADDR_BREDR) {
6204 /* Only incoming connections action is supported for now */
6205 if (cp->action != 0x01) {
6206 err = cmd->cmd_complete(cmd,
6207 MGMT_STATUS_INVALID_PARAMS);
6208 mgmt_pending_remove(cmd);
6212 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6217 __hci_update_page_scan(&req);
6222 addr_type = le_addr_type(cp->addr.type);
6224 if (cp->action == 0x02)
6225 auto_conn = HCI_AUTO_CONN_ALWAYS;
6226 else if (cp->action == 0x01)
6227 auto_conn = HCI_AUTO_CONN_DIRECT;
6229 auto_conn = HCI_AUTO_CONN_REPORT;
6231 /* Kernel internally uses conn_params with resolvable private
6232 * address, but Add Device allows only identity addresses.
6233 * Make sure it is enforced before calling
6234 * hci_conn_params_lookup.
6236 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6237 err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
6238 mgmt_pending_remove(cmd);
6242 /* If the connection parameters don't exist for this device,
6243 * they will be created and configured with defaults.
6245 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6247 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6248 mgmt_pending_remove(cmd);
6253 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6255 err = hci_req_run(&req, add_device_complete);
6257 /* ENODATA means no HCI commands were needed (e.g. if
6258 * the adapter is powered off).
6260 if (err == -ENODATA)
6261 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6262 mgmt_pending_remove(cmd);
6266 hci_dev_unlock(hdev);
6270 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6271 bdaddr_t *bdaddr, u8 type)
6273 struct mgmt_ev_device_removed ev;
6275 bacpy(&ev.addr.bdaddr, bdaddr);
6276 ev.addr.type = type;
6278 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6281 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6283 struct mgmt_pending_cmd *cmd;
6285 BT_DBG("status 0x%02x", status);
6289 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6293 cmd->cmd_complete(cmd, mgmt_status(status));
6294 mgmt_pending_remove(cmd);
6297 hci_dev_unlock(hdev);
6300 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6301 void *data, u16 len)
6303 struct mgmt_cp_remove_device *cp = data;
6304 struct mgmt_pending_cmd *cmd;
6305 struct hci_request req;
6308 BT_DBG("%s", hdev->name);
6310 hci_req_init(&req, hdev);
6314 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6320 cmd->cmd_complete = addr_cmd_complete;
6322 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6323 struct hci_conn_params *params;
6326 if (!bdaddr_type_is_valid(cp->addr.type)) {
6327 err = cmd->cmd_complete(cmd,
6328 MGMT_STATUS_INVALID_PARAMS);
6329 mgmt_pending_remove(cmd);
6333 if (cp->addr.type == BDADDR_BREDR) {
6334 err = hci_bdaddr_list_del(&hdev->whitelist,
6338 err = cmd->cmd_complete(cmd,
6339 MGMT_STATUS_INVALID_PARAMS);
6340 mgmt_pending_remove(cmd);
6344 __hci_update_page_scan(&req);
6346 device_removed(sk, hdev, &cp->addr.bdaddr,
6351 addr_type = le_addr_type(cp->addr.type);
6353 /* Kernel internally uses conn_params with resolvable private
6354 * address, but Remove Device allows only identity addresses.
6355 * Make sure it is enforced before calling
6356 * hci_conn_params_lookup.
6358 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6359 err = cmd->cmd_complete(cmd,
6360 MGMT_STATUS_INVALID_PARAMS);
6361 mgmt_pending_remove(cmd);
6365 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6368 err = cmd->cmd_complete(cmd,
6369 MGMT_STATUS_INVALID_PARAMS);
6370 mgmt_pending_remove(cmd);
6374 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6375 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6376 err = cmd->cmd_complete(cmd,
6377 MGMT_STATUS_INVALID_PARAMS);
6378 mgmt_pending_remove(cmd);
6382 list_del(¶ms->action);
6383 list_del(¶ms->list);
6385 __hci_update_background_scan(&req);
6387 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6389 struct hci_conn_params *p, *tmp;
6390 struct bdaddr_list *b, *btmp;
6392 if (cp->addr.type) {
6393 err = cmd->cmd_complete(cmd,
6394 MGMT_STATUS_INVALID_PARAMS);
6395 mgmt_pending_remove(cmd);
6399 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6400 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6405 __hci_update_page_scan(&req);
6407 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6408 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6410 device_removed(sk, hdev, &p->addr, p->addr_type);
6411 if (p->explicit_connect) {
6412 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6415 list_del(&p->action);
6420 BT_DBG("All LE connection parameters were removed");
6422 __hci_update_background_scan(&req);
6426 err = hci_req_run(&req, remove_device_complete);
6428 /* ENODATA means no HCI commands were needed (e.g. if
6429 * the adapter is powered off).
6431 if (err == -ENODATA)
6432 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6433 mgmt_pending_remove(cmd);
6437 hci_dev_unlock(hdev);
6441 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6444 struct mgmt_cp_load_conn_param *cp = data;
6445 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6446 sizeof(struct mgmt_conn_param));
6447 u16 param_count, expected_len;
6450 if (!lmp_le_capable(hdev))
6451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6452 MGMT_STATUS_NOT_SUPPORTED);
6454 param_count = __le16_to_cpu(cp->param_count);
6455 if (param_count > max_param_count) {
6456 BT_ERR("load_conn_param: too big param_count value %u",
6458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6459 MGMT_STATUS_INVALID_PARAMS);
6462 expected_len = sizeof(*cp) + param_count *
6463 sizeof(struct mgmt_conn_param);
6464 if (expected_len != len) {
6465 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6468 MGMT_STATUS_INVALID_PARAMS);
6471 BT_DBG("%s param_count %u", hdev->name, param_count);
6475 hci_conn_params_clear_disabled(hdev);
6477 for (i = 0; i < param_count; i++) {
6478 struct mgmt_conn_param *param = &cp->params[i];
6479 struct hci_conn_params *hci_param;
6480 u16 min, max, latency, timeout;
6483 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6486 if (param->addr.type == BDADDR_LE_PUBLIC) {
6487 addr_type = ADDR_LE_DEV_PUBLIC;
6488 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6489 addr_type = ADDR_LE_DEV_RANDOM;
6491 BT_ERR("Ignoring invalid connection parameters");
6495 min = le16_to_cpu(param->min_interval);
6496 max = le16_to_cpu(param->max_interval);
6497 latency = le16_to_cpu(param->latency);
6498 timeout = le16_to_cpu(param->timeout);
6500 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6501 min, max, latency, timeout);
6503 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6504 BT_ERR("Ignoring invalid connection parameters");
6508 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6511 BT_ERR("Failed to add connection parameters");
6515 hci_param->conn_min_interval = min;
6516 hci_param->conn_max_interval = max;
6517 hci_param->conn_latency = latency;
6518 hci_param->supervision_timeout = timeout;
6521 hci_dev_unlock(hdev);
6523 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6527 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6528 void *data, u16 len)
6530 struct mgmt_cp_set_external_config *cp = data;
6534 BT_DBG("%s", hdev->name);
6536 if (hdev_is_powered(hdev))
6537 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6538 MGMT_STATUS_REJECTED);
6540 if (cp->config != 0x00 && cp->config != 0x01)
6541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6542 MGMT_STATUS_INVALID_PARAMS);
6544 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6545 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6546 MGMT_STATUS_NOT_SUPPORTED);
6551 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6553 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6555 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6562 err = new_options(hdev, sk);
6564 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6565 mgmt_index_removed(hdev);
6567 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6568 hci_dev_set_flag(hdev, HCI_CONFIG);
6569 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6571 queue_work(hdev->req_workqueue, &hdev->power_on);
6573 set_bit(HCI_RAW, &hdev->flags);
6574 mgmt_index_added(hdev);
6579 hci_dev_unlock(hdev);
6583 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6584 void *data, u16 len)
6586 struct mgmt_cp_set_public_address *cp = data;
6590 BT_DBG("%s", hdev->name);
6592 if (hdev_is_powered(hdev))
6593 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6594 MGMT_STATUS_REJECTED);
6596 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6597 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6598 MGMT_STATUS_INVALID_PARAMS);
6600 if (!hdev->set_bdaddr)
6601 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6602 MGMT_STATUS_NOT_SUPPORTED);
6606 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6607 bacpy(&hdev->public_addr, &cp->bdaddr);
6609 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6616 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6617 err = new_options(hdev, sk);
6619 if (is_configured(hdev)) {
6620 mgmt_index_removed(hdev);
6622 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6624 hci_dev_set_flag(hdev, HCI_CONFIG);
6625 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6627 queue_work(hdev->req_workqueue, &hdev->power_on);
6631 hci_dev_unlock(hdev);
6635 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6638 eir[eir_len++] = sizeof(type) + data_len;
6639 eir[eir_len++] = type;
6640 memcpy(&eir[eir_len], data, data_len);
6641 eir_len += data_len;
6646 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6647 u16 opcode, struct sk_buff *skb)
6649 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6650 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6651 u8 *h192, *r192, *h256, *r256;
6652 struct mgmt_pending_cmd *cmd;
6656 BT_DBG("%s status %u", hdev->name, status);
6658 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6662 mgmt_cp = cmd->param;
6665 status = mgmt_status(status);
6672 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6673 struct hci_rp_read_local_oob_data *rp;
6675 if (skb->len != sizeof(*rp)) {
6676 status = MGMT_STATUS_FAILED;
6679 status = MGMT_STATUS_SUCCESS;
6680 rp = (void *)skb->data;
6682 eir_len = 5 + 18 + 18;
6689 struct hci_rp_read_local_oob_ext_data *rp;
6691 if (skb->len != sizeof(*rp)) {
6692 status = MGMT_STATUS_FAILED;
6695 status = MGMT_STATUS_SUCCESS;
6696 rp = (void *)skb->data;
6698 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6699 eir_len = 5 + 18 + 18;
6703 eir_len = 5 + 18 + 18 + 18 + 18;
6713 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6720 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6721 hdev->dev_class, 3);
6724 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6725 EIR_SSP_HASH_C192, h192, 16);
6726 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6727 EIR_SSP_RAND_R192, r192, 16);
6731 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6732 EIR_SSP_HASH_C256, h256, 16);
6733 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6734 EIR_SSP_RAND_R256, r256, 16);
6738 mgmt_rp->type = mgmt_cp->type;
6739 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6741 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6742 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6743 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6744 if (err < 0 || status)
6747 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6749 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6750 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6751 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6754 mgmt_pending_remove(cmd);
6757 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6758 struct mgmt_cp_read_local_oob_ext_data *cp)
6760 struct mgmt_pending_cmd *cmd;
6761 struct hci_request req;
6764 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6769 hci_req_init(&req, hdev);
6771 if (bredr_sc_enabled(hdev))
6772 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6774 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6776 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6778 mgmt_pending_remove(cmd);
6785 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6786 void *data, u16 data_len)
6788 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6789 struct mgmt_rp_read_local_oob_ext_data *rp;
6792 u8 status, flags, role, addr[7], hash[16], rand[16];
6795 BT_DBG("%s", hdev->name);
6797 if (hdev_is_powered(hdev)) {
6799 case BIT(BDADDR_BREDR):
6800 status = mgmt_bredr_support(hdev);
6806 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6807 status = mgmt_le_support(hdev);
6811 eir_len = 9 + 3 + 18 + 18 + 3;
6814 status = MGMT_STATUS_INVALID_PARAMS;
6819 status = MGMT_STATUS_NOT_POWERED;
6823 rp_len = sizeof(*rp) + eir_len;
6824 rp = kmalloc(rp_len, GFP_ATOMIC);
6835 case BIT(BDADDR_BREDR):
6836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6837 err = read_local_ssp_oob_req(hdev, sk, cp);
6838 hci_dev_unlock(hdev);
6842 status = MGMT_STATUS_FAILED;
6845 eir_len = eir_append_data(rp->eir, eir_len,
6847 hdev->dev_class, 3);
6850 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6851 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6852 smp_generate_oob(hdev, hash, rand) < 0) {
6853 hci_dev_unlock(hdev);
6854 status = MGMT_STATUS_FAILED;
6858 /* This should return the active RPA, but since the RPA
6859 * is only programmed on demand, it is really hard to fill
6860 * this in at the moment. For now disallow retrieving
6861 * local out-of-band data when privacy is in use.
6863 * Returning the identity address will not help here since
6864 * pairing happens before the identity resolving key is
6865 * known and thus the connection establishment happens
6866 * based on the RPA and not the identity address.
6868 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6869 hci_dev_unlock(hdev);
6870 status = MGMT_STATUS_REJECTED;
6874 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6875 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6876 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6877 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6878 memcpy(addr, &hdev->static_addr, 6);
6881 memcpy(addr, &hdev->bdaddr, 6);
6885 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6886 addr, sizeof(addr));
6888 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6893 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6894 &role, sizeof(role));
6896 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6897 eir_len = eir_append_data(rp->eir, eir_len,
6899 hash, sizeof(hash));
6901 eir_len = eir_append_data(rp->eir, eir_len,
6903 rand, sizeof(rand));
6906 flags = get_adv_discov_flags(hdev);
6908 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6909 flags |= LE_AD_NO_BREDR;
6911 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6912 &flags, sizeof(flags));
6916 hci_dev_unlock(hdev);
6918 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6920 status = MGMT_STATUS_SUCCESS;
6923 rp->type = cp->type;
6924 rp->eir_len = cpu_to_le16(eir_len);
6926 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6927 status, rp, sizeof(*rp) + eir_len);
6928 if (err < 0 || status)
6931 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6932 rp, sizeof(*rp) + eir_len,
6933 HCI_MGMT_OOB_DATA_EVENTS, sk);
6941 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6945 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6946 flags |= MGMT_ADV_FLAG_DISCOV;
6947 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6948 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6950 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6951 flags |= MGMT_ADV_FLAG_TX_POWER;
6956 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6957 void *data, u16 data_len)
6959 struct mgmt_rp_read_adv_features *rp;
6963 struct adv_info *adv_instance;
6964 u32 supported_flags;
6966 BT_DBG("%s", hdev->name);
6968 if (!lmp_le_capable(hdev))
6969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6970 MGMT_STATUS_REJECTED);
6974 rp_len = sizeof(*rp);
6976 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6978 rp_len += hdev->adv_instance_cnt;
6980 rp = kmalloc(rp_len, GFP_ATOMIC);
6982 hci_dev_unlock(hdev);
6986 supported_flags = get_supported_adv_flags(hdev);
6988 rp->supported_flags = cpu_to_le32(supported_flags);
6989 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6990 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6991 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6995 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6996 if (i >= hdev->adv_instance_cnt)
6999 rp->instance[i] = adv_instance->instance;
7002 rp->num_instances = hdev->adv_instance_cnt;
7004 rp->num_instances = 0;
7007 hci_dev_unlock(hdev);
7009 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7010 MGMT_STATUS_SUCCESS, rp, rp_len);
7017 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7018 u8 len, bool is_adv_data)
7020 u8 max_len = HCI_MAX_AD_LENGTH;
7022 bool flags_managed = false;
7023 bool tx_power_managed = false;
7024 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
7025 MGMT_ADV_FLAG_MANAGED_FLAGS;
7027 if (is_adv_data && (adv_flags & flags_params)) {
7028 flags_managed = true;
7032 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
7033 tx_power_managed = true;
7040 /* Make sure that the data is correctly formatted. */
7041 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7044 if (flags_managed && data[i + 1] == EIR_FLAGS)
7047 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
7050 /* If the current field length would exceed the total data
7051 * length, then it's invalid.
7053 if (i + cur_len >= len)
7060 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7063 struct mgmt_pending_cmd *cmd;
7064 struct mgmt_cp_add_advertising *cp;
7065 struct mgmt_rp_add_advertising rp;
7066 struct adv_info *adv_instance, *n;
7069 BT_DBG("status %d", status);
7073 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7076 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7078 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7079 if (!adv_instance->pending)
7083 adv_instance->pending = false;
7087 instance = adv_instance->instance;
7089 if (hdev->cur_adv_instance == instance)
7090 cancel_adv_timeout(hdev);
7092 hci_remove_adv_instance(hdev, instance);
7093 advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7100 rp.instance = cp->instance;
7103 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7104 mgmt_status(status));
7106 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7107 mgmt_status(status), &rp, sizeof(rp));
7109 mgmt_pending_remove(cmd);
7112 hci_dev_unlock(hdev);
7115 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
7118 struct hci_request req;
7120 hdev->adv_instance_timeout = 0;
7122 instance = get_current_adv_instance(hdev);
7123 if (instance == 0x00)
7127 hci_req_init(&req, hdev);
7129 clear_adv_instance(hdev, &req, instance, false);
7131 if (list_empty(&hdev->adv_instances))
7132 disable_advertising(&req);
7134 if (!skb_queue_empty(&req.cmd_q))
7135 hci_req_run(&req, NULL);
7137 hci_dev_unlock(hdev);
7140 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7141 void *data, u16 data_len)
7143 struct mgmt_cp_add_advertising *cp = data;
7144 struct mgmt_rp_add_advertising rp;
7146 u32 supported_flags;
7148 u16 timeout, duration;
7149 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7150 u8 schedule_instance = 0;
7151 struct adv_info *next_instance;
7153 struct mgmt_pending_cmd *cmd;
7154 struct hci_request req;
7156 BT_DBG("%s", hdev->name);
7158 status = mgmt_le_support(hdev);
7160 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7163 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7165 MGMT_STATUS_INVALID_PARAMS);
7167 flags = __le32_to_cpu(cp->flags);
7168 timeout = __le16_to_cpu(cp->timeout);
7169 duration = __le16_to_cpu(cp->duration);
7171 /* The current implementation only supports a subset of the specified
7174 supported_flags = get_supported_adv_flags(hdev);
7175 if (flags & ~supported_flags)
7176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7177 MGMT_STATUS_INVALID_PARAMS);
7181 if (timeout && !hdev_is_powered(hdev)) {
7182 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7183 MGMT_STATUS_REJECTED);
7187 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7188 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7189 pending_find(MGMT_OP_SET_LE, hdev)) {
7190 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7195 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7196 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7197 cp->scan_rsp_len, false)) {
7198 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7199 MGMT_STATUS_INVALID_PARAMS);
7203 err = hci_add_adv_instance(hdev, cp->instance, flags,
7204 cp->adv_data_len, cp->data,
7206 cp->data + cp->adv_data_len,
7209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7210 MGMT_STATUS_FAILED);
7214 /* Only trigger an advertising added event if a new instance was
7217 if (hdev->adv_instance_cnt > prev_instance_cnt)
7218 advertising_added(sk, hdev, cp->instance);
7220 hci_dev_set_flag(hdev, HCI_ADVERTISING_INSTANCE);
7222 if (hdev->cur_adv_instance == cp->instance) {
7223 /* If the currently advertised instance is being changed then
7224 * cancel the current advertising and schedule the next
7225 * instance. If there is only one instance then the overridden
7226 * advertising data will be visible right away.
7228 cancel_adv_timeout(hdev);
7230 next_instance = hci_get_next_instance(hdev, cp->instance);
7232 schedule_instance = next_instance->instance;
7233 } else if (!hdev->adv_instance_timeout) {
7234 /* Immediately advertise the new instance if no other
7235 * instance is currently being advertised.
7237 schedule_instance = cp->instance;
7240 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7241 * there is no instance to be advertised then we have no HCI
7242 * communication to make. Simply return.
7244 if (!hdev_is_powered(hdev) ||
7245 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7246 !schedule_instance) {
7247 rp.instance = cp->instance;
7248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7249 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7253 /* We're good to go, update advertising data, parameters, and start
7256 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7263 hci_req_init(&req, hdev);
7265 err = schedule_adv_instance(&req, schedule_instance, true);
7268 err = hci_req_run(&req, add_advertising_complete);
7271 mgmt_pending_remove(cmd);
7274 hci_dev_unlock(hdev);
7279 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7282 struct mgmt_pending_cmd *cmd;
7283 struct mgmt_cp_remove_advertising *cp;
7284 struct mgmt_rp_remove_advertising rp;
7286 BT_DBG("status %d", status);
7290 /* A failure status here only means that we failed to disable
7291 * advertising. Otherwise, the advertising instance has been removed,
7292 * so report success.
7294 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7299 rp.instance = cp->instance;
7301 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7303 mgmt_pending_remove(cmd);
7306 hci_dev_unlock(hdev);
7309 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7310 void *data, u16 data_len)
7312 struct mgmt_cp_remove_advertising *cp = data;
7313 struct mgmt_rp_remove_advertising rp;
7314 struct mgmt_pending_cmd *cmd;
7315 struct hci_request req;
7318 BT_DBG("%s", hdev->name);
7322 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7323 err = mgmt_cmd_status(sk, hdev->id,
7324 MGMT_OP_REMOVE_ADVERTISING,
7325 MGMT_STATUS_INVALID_PARAMS);
7329 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7330 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7331 pending_find(MGMT_OP_SET_LE, hdev)) {
7332 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7337 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7338 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7339 MGMT_STATUS_INVALID_PARAMS);
7343 hci_req_init(&req, hdev);
7345 clear_adv_instance(hdev, &req, cp->instance, true);
7347 if (list_empty(&hdev->adv_instances))
7348 disable_advertising(&req);
7350 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7351 * flag is set or the device isn't powered then we have no HCI
7352 * communication to make. Simply return.
7354 if (skb_queue_empty(&req.cmd_q) ||
7355 !hdev_is_powered(hdev) ||
7356 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7357 rp.instance = cp->instance;
7358 err = mgmt_cmd_complete(sk, hdev->id,
7359 MGMT_OP_REMOVE_ADVERTISING,
7360 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7364 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7371 err = hci_req_run(&req, remove_advertising_complete);
7373 mgmt_pending_remove(cmd);
7376 hci_dev_unlock(hdev);
7381 static const struct hci_mgmt_handler mgmt_handlers[] = {
7382 { NULL }, /* 0x0000 (no command) */
7383 { read_version, MGMT_READ_VERSION_SIZE,
7385 HCI_MGMT_UNTRUSTED },
7386 { read_commands, MGMT_READ_COMMANDS_SIZE,
7388 HCI_MGMT_UNTRUSTED },
7389 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7391 HCI_MGMT_UNTRUSTED },
7392 { read_controller_info, MGMT_READ_INFO_SIZE,
7393 HCI_MGMT_UNTRUSTED },
7394 { set_powered, MGMT_SETTING_SIZE },
7395 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7396 { set_connectable, MGMT_SETTING_SIZE },
7397 { set_fast_connectable, MGMT_SETTING_SIZE },
7398 { set_bondable, MGMT_SETTING_SIZE },
7399 { set_link_security, MGMT_SETTING_SIZE },
7400 { set_ssp, MGMT_SETTING_SIZE },
7401 { set_hs, MGMT_SETTING_SIZE },
7402 { set_le, MGMT_SETTING_SIZE },
7403 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7404 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7405 { add_uuid, MGMT_ADD_UUID_SIZE },
7406 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7407 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7409 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7411 { disconnect, MGMT_DISCONNECT_SIZE },
7412 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7413 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7414 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7415 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7416 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7417 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7418 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7419 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7420 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7421 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7422 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7423 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7424 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7426 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7427 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7428 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7429 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7430 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7431 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7432 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7433 { set_advertising, MGMT_SETTING_SIZE },
7434 { set_bredr, MGMT_SETTING_SIZE },
7435 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7436 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7437 { set_secure_conn, MGMT_SETTING_SIZE },
7438 { set_debug_keys, MGMT_SETTING_SIZE },
7439 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7440 { load_irks, MGMT_LOAD_IRKS_SIZE,
7442 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7443 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7444 { add_device, MGMT_ADD_DEVICE_SIZE },
7445 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7446 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7448 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7450 HCI_MGMT_UNTRUSTED },
7451 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7452 HCI_MGMT_UNCONFIGURED |
7453 HCI_MGMT_UNTRUSTED },
7454 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7455 HCI_MGMT_UNCONFIGURED },
7456 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7457 HCI_MGMT_UNCONFIGURED },
7458 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7460 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7461 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7463 HCI_MGMT_UNTRUSTED },
7464 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7465 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7467 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7470 void mgmt_index_added(struct hci_dev *hdev)
7472 struct mgmt_ev_ext_index ev;
7474 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7477 switch (hdev->dev_type) {
7479 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7480 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7481 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7484 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7485 HCI_MGMT_INDEX_EVENTS);
7498 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7499 HCI_MGMT_EXT_INDEX_EVENTS);
7502 void mgmt_index_removed(struct hci_dev *hdev)
7504 struct mgmt_ev_ext_index ev;
7505 u8 status = MGMT_STATUS_INVALID_INDEX;
7507 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7510 switch (hdev->dev_type) {
7512 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7514 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7515 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7516 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7519 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7520 HCI_MGMT_INDEX_EVENTS);
7533 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7534 HCI_MGMT_EXT_INDEX_EVENTS);
7537 /* This function requires the caller holds hdev->lock */
7538 static void restart_le_actions(struct hci_request *req)
7540 struct hci_dev *hdev = req->hdev;
7541 struct hci_conn_params *p;
7543 list_for_each_entry(p, &hdev->le_conn_params, list) {
7544 /* Needed for AUTO_OFF case where might not "really"
7545 * have been powered off.
7547 list_del_init(&p->action);
7549 switch (p->auto_connect) {
7550 case HCI_AUTO_CONN_DIRECT:
7551 case HCI_AUTO_CONN_ALWAYS:
7552 list_add(&p->action, &hdev->pend_le_conns);
7554 case HCI_AUTO_CONN_REPORT:
7555 list_add(&p->action, &hdev->pend_le_reports);
7562 __hci_update_background_scan(req);
7565 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7567 struct cmd_lookup match = { NULL, hdev };
7569 BT_DBG("status 0x%02x", status);
7572 /* Register the available SMP channels (BR/EDR and LE) only
7573 * when successfully powering on the controller. This late
7574 * registration is required so that LE SMP can clearly
7575 * decide if the public address or static address is used.
7582 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7584 new_settings(hdev, match.sk);
7586 hci_dev_unlock(hdev);
7592 static int powered_update_hci(struct hci_dev *hdev)
7594 struct hci_request req;
7595 struct adv_info *adv_instance;
7598 hci_req_init(&req, hdev);
7600 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7601 !lmp_host_ssp_capable(hdev)) {
7604 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7606 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7609 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7610 sizeof(support), &support);
7614 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7615 lmp_bredr_capable(hdev)) {
7616 struct hci_cp_write_le_host_supported cp;
7621 /* Check first if we already have the right
7622 * host state (host features set)
7624 if (cp.le != lmp_host_le_capable(hdev) ||
7625 cp.simul != lmp_host_le_br_capable(hdev))
7626 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7630 if (lmp_le_capable(hdev)) {
7631 /* Make sure the controller has a good default for
7632 * advertising data. This also applies to the case
7633 * where BR/EDR was toggled during the AUTO_OFF phase.
7635 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7636 (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7637 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) {
7638 update_adv_data(&req);
7639 update_scan_rsp_data(&req);
7642 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7643 hdev->cur_adv_instance == 0x00 &&
7644 !list_empty(&hdev->adv_instances)) {
7645 adv_instance = list_first_entry(&hdev->adv_instances,
7646 struct adv_info, list);
7647 hdev->cur_adv_instance = adv_instance->instance;
7650 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7651 enable_advertising(&req);
7652 else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7653 hdev->cur_adv_instance)
7654 schedule_adv_instance(&req, hdev->cur_adv_instance,
7657 restart_le_actions(&req);
7660 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7661 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7662 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7663 sizeof(link_sec), &link_sec);
7665 if (lmp_bredr_capable(hdev)) {
7666 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7667 write_fast_connectable(&req, true);
7669 write_fast_connectable(&req, false);
7670 __hci_update_page_scan(&req);
7676 return hci_req_run(&req, powered_complete);
7679 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7681 struct cmd_lookup match = { NULL, hdev };
7682 u8 status, zero_cod[] = { 0, 0, 0 };
7685 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7689 if (powered_update_hci(hdev) == 0)
7692 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7697 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7699 /* If the power off is because of hdev unregistration let
7700 * use the appropriate INVALID_INDEX status. Otherwise use
7701 * NOT_POWERED. We cover both scenarios here since later in
7702 * mgmt_index_removed() any hci_conn callbacks will have already
7703 * been triggered, potentially causing misleading DISCONNECTED
7706 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7707 status = MGMT_STATUS_INVALID_INDEX;
7709 status = MGMT_STATUS_NOT_POWERED;
7711 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7713 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7714 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7715 zero_cod, sizeof(zero_cod), NULL);
7718 err = new_settings(hdev, match.sk);
7726 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7728 struct mgmt_pending_cmd *cmd;
7731 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7735 if (err == -ERFKILL)
7736 status = MGMT_STATUS_RFKILLED;
7738 status = MGMT_STATUS_FAILED;
7740 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7742 mgmt_pending_remove(cmd);
7745 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7747 struct hci_request req;
7751 /* When discoverable timeout triggers, then just make sure
7752 * the limited discoverable flag is cleared. Even in the case
7753 * of a timeout triggered from general discoverable, it is
7754 * safe to unconditionally clear the flag.
7756 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7757 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7759 hci_req_init(&req, hdev);
7760 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7761 u8 scan = SCAN_PAGE;
7762 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7763 sizeof(scan), &scan);
7767 /* Advertising instances don't use the global discoverable setting, so
7768 * only update AD if advertising was enabled using Set Advertising.
7770 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7771 update_adv_data(&req);
7773 hci_req_run(&req, NULL);
7775 hdev->discov_timeout = 0;
7777 new_settings(hdev, NULL);
7779 hci_dev_unlock(hdev);
7782 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7785 struct mgmt_ev_new_link_key ev;
7787 memset(&ev, 0, sizeof(ev));
7789 ev.store_hint = persistent;
7790 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7791 ev.key.addr.type = BDADDR_BREDR;
7792 ev.key.type = key->type;
7793 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7794 ev.key.pin_len = key->pin_len;
7796 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7799 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7801 switch (ltk->type) {
7804 if (ltk->authenticated)
7805 return MGMT_LTK_AUTHENTICATED;
7806 return MGMT_LTK_UNAUTHENTICATED;
7808 if (ltk->authenticated)
7809 return MGMT_LTK_P256_AUTH;
7810 return MGMT_LTK_P256_UNAUTH;
7811 case SMP_LTK_P256_DEBUG:
7812 return MGMT_LTK_P256_DEBUG;
7815 return MGMT_LTK_UNAUTHENTICATED;
7818 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7820 struct mgmt_ev_new_long_term_key ev;
7822 memset(&ev, 0, sizeof(ev));
7824 /* Devices using resolvable or non-resolvable random addresses
7825 * without providing an identity resolving key don't require
7826 * to store long term keys. Their addresses will change the
7829 * Only when a remote device provides an identity address
7830 * make sure the long term key is stored. If the remote
7831 * identity is known, the long term keys are internally
7832 * mapped to the identity address. So allow static random
7833 * and public addresses here.
7835 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7836 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7837 ev.store_hint = 0x00;
7839 ev.store_hint = persistent;
7841 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7842 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7843 ev.key.type = mgmt_ltk_type(key);
7844 ev.key.enc_size = key->enc_size;
7845 ev.key.ediv = key->ediv;
7846 ev.key.rand = key->rand;
7848 if (key->type == SMP_LTK)
7851 /* Make sure we copy only the significant bytes based on the
7852 * encryption key size, and set the rest of the value to zeroes.
7854 memcpy(ev.key.val, key->val, key->enc_size);
7855 memset(ev.key.val + key->enc_size, 0,
7856 sizeof(ev.key.val) - key->enc_size);
7858 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7861 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7863 struct mgmt_ev_new_irk ev;
7865 memset(&ev, 0, sizeof(ev));
7867 ev.store_hint = persistent;
7869 bacpy(&ev.rpa, &irk->rpa);
7870 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7871 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7872 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7874 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7877 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7880 struct mgmt_ev_new_csrk ev;
7882 memset(&ev, 0, sizeof(ev));
7884 /* Devices using resolvable or non-resolvable random addresses
7885 * without providing an identity resolving key don't require
7886 * to store signature resolving keys. Their addresses will change
7887 * the next time around.
7889 * Only when a remote device provides an identity address
7890 * make sure the signature resolving key is stored. So allow
7891 * static random and public addresses here.
7893 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7894 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7895 ev.store_hint = 0x00;
7897 ev.store_hint = persistent;
7899 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7900 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7901 ev.key.type = csrk->type;
7902 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7904 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7907 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7908 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7909 u16 max_interval, u16 latency, u16 timeout)
7911 struct mgmt_ev_new_conn_param ev;
7913 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7916 memset(&ev, 0, sizeof(ev));
7917 bacpy(&ev.addr.bdaddr, bdaddr);
7918 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7919 ev.store_hint = store_hint;
7920 ev.min_interval = cpu_to_le16(min_interval);
7921 ev.max_interval = cpu_to_le16(max_interval);
7922 ev.latency = cpu_to_le16(latency);
7923 ev.timeout = cpu_to_le16(timeout);
7925 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7928 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7929 u32 flags, u8 *name, u8 name_len)
7932 struct mgmt_ev_device_connected *ev = (void *) buf;
7935 bacpy(&ev->addr.bdaddr, &conn->dst);
7936 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7938 ev->flags = __cpu_to_le32(flags);
7940 /* We must ensure that the EIR Data fields are ordered and
7941 * unique. Keep it simple for now and avoid the problem by not
7942 * adding any BR/EDR data to the LE adv.
7944 if (conn->le_adv_data_len > 0) {
7945 memcpy(&ev->eir[eir_len],
7946 conn->le_adv_data, conn->le_adv_data_len);
7947 eir_len = conn->le_adv_data_len;
7950 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7953 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7954 eir_len = eir_append_data(ev->eir, eir_len,
7956 conn->dev_class, 3);
7959 ev->eir_len = cpu_to_le16(eir_len);
7961 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7962 sizeof(*ev) + eir_len, NULL);
7965 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7967 struct sock **sk = data;
7969 cmd->cmd_complete(cmd, 0);
7974 mgmt_pending_remove(cmd);
7977 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7979 struct hci_dev *hdev = data;
7980 struct mgmt_cp_unpair_device *cp = cmd->param;
7982 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7984 cmd->cmd_complete(cmd, 0);
7985 mgmt_pending_remove(cmd);
7988 bool mgmt_powering_down(struct hci_dev *hdev)
7990 struct mgmt_pending_cmd *cmd;
7991 struct mgmt_mode *cp;
7993 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8004 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8005 u8 link_type, u8 addr_type, u8 reason,
8006 bool mgmt_connected)
8008 struct mgmt_ev_device_disconnected ev;
8009 struct sock *sk = NULL;
8011 /* The connection is still in hci_conn_hash so test for 1
8012 * instead of 0 to know if this is the last one.
8014 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8015 cancel_delayed_work(&hdev->power_off);
8016 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8019 if (!mgmt_connected)
8022 if (link_type != ACL_LINK && link_type != LE_LINK)
8025 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8027 bacpy(&ev.addr.bdaddr, bdaddr);
8028 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8031 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8036 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8040 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8041 u8 link_type, u8 addr_type, u8 status)
8043 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8044 struct mgmt_cp_disconnect *cp;
8045 struct mgmt_pending_cmd *cmd;
8047 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8050 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8056 if (bacmp(bdaddr, &cp->addr.bdaddr))
8059 if (cp->addr.type != bdaddr_type)
8062 cmd->cmd_complete(cmd, mgmt_status(status));
8063 mgmt_pending_remove(cmd);
8066 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8067 u8 addr_type, u8 status)
8069 struct mgmt_ev_connect_failed ev;
8071 /* The connection is still in hci_conn_hash so test for 1
8072 * instead of 0 to know if this is the last one.
8074 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8075 cancel_delayed_work(&hdev->power_off);
8076 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8079 bacpy(&ev.addr.bdaddr, bdaddr);
8080 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8081 ev.status = mgmt_status(status);
8083 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8086 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8088 struct mgmt_ev_pin_code_request ev;
8090 bacpy(&ev.addr.bdaddr, bdaddr);
8091 ev.addr.type = BDADDR_BREDR;
8094 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8097 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8100 struct mgmt_pending_cmd *cmd;
8102 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8106 cmd->cmd_complete(cmd, mgmt_status(status));
8107 mgmt_pending_remove(cmd);
8110 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8113 struct mgmt_pending_cmd *cmd;
8115 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8119 cmd->cmd_complete(cmd, mgmt_status(status));
8120 mgmt_pending_remove(cmd);
8123 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8124 u8 link_type, u8 addr_type, u32 value,
8127 struct mgmt_ev_user_confirm_request ev;
8129 BT_DBG("%s", hdev->name);
8131 bacpy(&ev.addr.bdaddr, bdaddr);
8132 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8133 ev.confirm_hint = confirm_hint;
8134 ev.value = cpu_to_le32(value);
8136 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8140 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8141 u8 link_type, u8 addr_type)
8143 struct mgmt_ev_user_passkey_request ev;
8145 BT_DBG("%s", hdev->name);
8147 bacpy(&ev.addr.bdaddr, bdaddr);
8148 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8150 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8154 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8155 u8 link_type, u8 addr_type, u8 status,
8158 struct mgmt_pending_cmd *cmd;
8160 cmd = pending_find(opcode, hdev);
8164 cmd->cmd_complete(cmd, mgmt_status(status));
8165 mgmt_pending_remove(cmd);
8170 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8171 u8 link_type, u8 addr_type, u8 status)
8173 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8174 status, MGMT_OP_USER_CONFIRM_REPLY);
8177 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8178 u8 link_type, u8 addr_type, u8 status)
8180 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8182 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8185 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8186 u8 link_type, u8 addr_type, u8 status)
8188 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8189 status, MGMT_OP_USER_PASSKEY_REPLY);
8192 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8193 u8 link_type, u8 addr_type, u8 status)
8195 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8197 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8200 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8201 u8 link_type, u8 addr_type, u32 passkey,
8204 struct mgmt_ev_passkey_notify ev;
8206 BT_DBG("%s", hdev->name);
8208 bacpy(&ev.addr.bdaddr, bdaddr);
8209 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8210 ev.passkey = __cpu_to_le32(passkey);
8211 ev.entered = entered;
8213 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8216 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8218 struct mgmt_ev_auth_failed ev;
8219 struct mgmt_pending_cmd *cmd;
8220 u8 status = mgmt_status(hci_status);
8222 bacpy(&ev.addr.bdaddr, &conn->dst);
8223 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8226 cmd = find_pairing(conn);
8228 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8229 cmd ? cmd->sk : NULL);
8232 cmd->cmd_complete(cmd, status);
8233 mgmt_pending_remove(cmd);
8237 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8239 struct cmd_lookup match = { NULL, hdev };
8243 u8 mgmt_err = mgmt_status(status);
8244 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8245 cmd_status_rsp, &mgmt_err);
8249 if (test_bit(HCI_AUTH, &hdev->flags))
8250 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8252 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8254 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8258 new_settings(hdev, match.sk);
8264 static void clear_eir(struct hci_request *req)
8266 struct hci_dev *hdev = req->hdev;
8267 struct hci_cp_write_eir cp;
8269 if (!lmp_ext_inq_capable(hdev))
8272 memset(hdev->eir, 0, sizeof(hdev->eir));
8274 memset(&cp, 0, sizeof(cp));
8276 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8279 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8281 struct cmd_lookup match = { NULL, hdev };
8282 struct hci_request req;
8283 bool changed = false;
8286 u8 mgmt_err = mgmt_status(status);
8288 if (enable && hci_dev_test_and_clear_flag(hdev,
8290 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8291 new_settings(hdev, NULL);
8294 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8300 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8302 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8304 changed = hci_dev_test_and_clear_flag(hdev,
8307 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8310 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8313 new_settings(hdev, match.sk);
8318 hci_req_init(&req, hdev);
8320 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8321 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8322 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8323 sizeof(enable), &enable);
8329 hci_req_run(&req, NULL);
8332 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8334 struct cmd_lookup *match = data;
8336 if (match->sk == NULL) {
8337 match->sk = cmd->sk;
8338 sock_hold(match->sk);
8342 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8345 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8347 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8348 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8349 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8352 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8353 dev_class, 3, NULL);
8359 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8361 struct mgmt_cp_set_local_name ev;
8362 struct mgmt_pending_cmd *cmd;
8367 memset(&ev, 0, sizeof(ev));
8368 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8369 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8371 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8373 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8375 /* If this is a HCI command related to powering on the
8376 * HCI dev don't send any mgmt signals.
8378 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8382 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8383 cmd ? cmd->sk : NULL);
8386 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8390 for (i = 0; i < uuid_count; i++) {
8391 if (!memcmp(uuid, uuids[i], 16))
8398 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8402 while (parsed < eir_len) {
8403 u8 field_len = eir[0];
8410 if (eir_len - parsed < field_len + 1)
8414 case EIR_UUID16_ALL:
8415 case EIR_UUID16_SOME:
8416 for (i = 0; i + 3 <= field_len; i += 2) {
8417 memcpy(uuid, bluetooth_base_uuid, 16);
8418 uuid[13] = eir[i + 3];
8419 uuid[12] = eir[i + 2];
8420 if (has_uuid(uuid, uuid_count, uuids))
8424 case EIR_UUID32_ALL:
8425 case EIR_UUID32_SOME:
8426 for (i = 0; i + 5 <= field_len; i += 4) {
8427 memcpy(uuid, bluetooth_base_uuid, 16);
8428 uuid[15] = eir[i + 5];
8429 uuid[14] = eir[i + 4];
8430 uuid[13] = eir[i + 3];
8431 uuid[12] = eir[i + 2];
8432 if (has_uuid(uuid, uuid_count, uuids))
8436 case EIR_UUID128_ALL:
8437 case EIR_UUID128_SOME:
8438 for (i = 0; i + 17 <= field_len; i += 16) {
8439 memcpy(uuid, eir + i + 2, 16);
8440 if (has_uuid(uuid, uuid_count, uuids))
8446 parsed += field_len + 1;
8447 eir += field_len + 1;
8453 static void restart_le_scan(struct hci_dev *hdev)
8455 /* If controller is not scanning we are done. */
8456 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8459 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8460 hdev->discovery.scan_start +
8461 hdev->discovery.scan_duration))
8464 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8465 DISCOV_LE_RESTART_DELAY);
8468 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8469 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8471 /* If a RSSI threshold has been specified, and
8472 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8473 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8474 * is set, let it through for further processing, as we might need to
8477 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8478 * the results are also dropped.
8480 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8481 (rssi == HCI_RSSI_INVALID ||
8482 (rssi < hdev->discovery.rssi &&
8483 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8486 if (hdev->discovery.uuid_count != 0) {
8487 /* If a list of UUIDs is provided in filter, results with no
8488 * matching UUID should be dropped.
8490 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8491 hdev->discovery.uuids) &&
8492 !eir_has_uuids(scan_rsp, scan_rsp_len,
8493 hdev->discovery.uuid_count,
8494 hdev->discovery.uuids))
8498 /* If duplicate filtering does not report RSSI changes, then restart
8499 * scanning to ensure updated result with updated RSSI values.
8501 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8502 restart_le_scan(hdev);
8504 /* Validate RSSI value against the RSSI threshold once more. */
8505 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8506 rssi < hdev->discovery.rssi)
8513 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8514 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8515 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8518 struct mgmt_ev_device_found *ev = (void *)buf;
8521 /* Don't send events for a non-kernel initiated discovery. With
8522 * LE one exception is if we have pend_le_reports > 0 in which
8523 * case we're doing passive scanning and want these events.
8525 if (!hci_discovery_active(hdev)) {
8526 if (link_type == ACL_LINK)
8528 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8532 if (hdev->discovery.result_filtering) {
8533 /* We are using service discovery */
8534 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8539 /* Make sure that the buffer is big enough. The 5 extra bytes
8540 * are for the potential CoD field.
8542 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8545 memset(buf, 0, sizeof(buf));
8547 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8548 * RSSI value was reported as 0 when not available. This behavior
8549 * is kept when using device discovery. This is required for full
8550 * backwards compatibility with the API.
8552 * However when using service discovery, the value 127 will be
8553 * returned when the RSSI is not available.
8555 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8556 link_type == ACL_LINK)
8559 bacpy(&ev->addr.bdaddr, bdaddr);
8560 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8562 ev->flags = cpu_to_le32(flags);
8565 /* Copy EIR or advertising data into event */
8566 memcpy(ev->eir, eir, eir_len);
8568 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8569 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8572 if (scan_rsp_len > 0)
8573 /* Append scan response data to event */
8574 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8576 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8577 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8579 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8582 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8583 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8585 struct mgmt_ev_device_found *ev;
8586 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8589 ev = (struct mgmt_ev_device_found *) buf;
8591 memset(buf, 0, sizeof(buf));
8593 bacpy(&ev->addr.bdaddr, bdaddr);
8594 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8597 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8600 ev->eir_len = cpu_to_le16(eir_len);
8602 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8605 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8607 struct mgmt_ev_discovering ev;
8609 BT_DBG("%s discovering %u", hdev->name, discovering);
8611 memset(&ev, 0, sizeof(ev));
8612 ev.type = hdev->discovery.type;
8613 ev.discovering = discovering;
8615 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8618 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8620 BT_DBG("%s status %u", hdev->name, status);
8623 void mgmt_reenable_advertising(struct hci_dev *hdev)
8625 struct hci_request req;
8628 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8629 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8632 instance = get_current_adv_instance(hdev);
8634 hci_req_init(&req, hdev);
8637 schedule_adv_instance(&req, instance, true);
8639 update_adv_data(&req);
8640 update_scan_rsp_data(&req);
8641 enable_advertising(&req);
8644 hci_req_run(&req, adv_enable_complete);
8647 static struct hci_mgmt_chan chan = {
8648 .channel = HCI_CHANNEL_CONTROL,
8649 .handler_count = ARRAY_SIZE(mgmt_handlers),
8650 .handlers = mgmt_handlers,
8651 .hdev_init = mgmt_init_hdev,
8656 return hci_mgmt_chan_register(&chan);
8659 void mgmt_exit(void)
8661 hci_mgmt_chan_unregister(&chan);