Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== pull request: bluetooth-next 2015-04-04 Here's what's probably the last bluetooth-next pull request for 4.1: - Fixes for LE advertising data & advertising parameters - Fix for race condition with HCI_RESET flag - New BNEPGETSUPPFEAT ioctl, needed for certification - New HCI request callback type to get the resulting skb - Cleanups to use BIT() macro wherever possible - Consolidate Broadcom device entries in the btusb HCI driver - Check for valid flags in CMTP, HIDP & BNEP - Disallow local privacy & OOB data combo to prevent a potential race - Expose SMP & ECDH selftest results through debugfs - Expose current Device ID info through debugfs Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7abccdba25
@ -111,13 +111,7 @@ static const struct usb_device_id btusb_table[] = {
|
||||
{ USB_DEVICE(0x0c10, 0x0000) },
|
||||
|
||||
/* Broadcom BCM20702A0 */
|
||||
{ USB_DEVICE(0x0489, 0xe042) },
|
||||
{ USB_DEVICE(0x04ca, 0x2003) },
|
||||
{ USB_DEVICE(0x0b05, 0x17b5) },
|
||||
{ USB_DEVICE(0x0b05, 0x17cb) },
|
||||
{ USB_DEVICE(0x413c, 0x8197) },
|
||||
{ USB_DEVICE(0x13d3, 0x3404),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Broadcom BCM20702B0 (Dynex/Insignia) */
|
||||
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
|
||||
@ -139,10 +133,12 @@ static const struct usb_device_id btusb_table[] = {
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Belkin F8065bf - Broadcom based */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* IMC Networks - Broadcom based */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Intel Bluetooth USB Bootloader (RAM module) */
|
||||
{ USB_DEVICE(0x8087, 0x0a5a),
|
||||
|
@ -499,7 +499,7 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
|
||||
BIT(HCI_UART_INIT_PENDING) |
|
||||
BIT(HCI_UART_EXT_CONFIG);
|
||||
|
||||
if ((flags & ~valid_flags))
|
||||
if (flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
hu->hdev_flags = flags;
|
||||
|
@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
|
||||
#define BNEPCONNDEL _IOW('B', 201, int)
|
||||
#define BNEPGETCONNLIST _IOR('B', 210, int)
|
||||
#define BNEPGETCONNINFO _IOR('B', 211, int)
|
||||
#define BNEPGETSUPPFEAT _IOR('B', 212, int)
|
||||
|
||||
#define CMTPCONNADD _IOW('C', 200, int)
|
||||
#define CMTPCONNDEL _IOW('C', 201, int)
|
||||
@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
|
||||
COMPATIBLE_IOCTL(BNEPCONNDEL)
|
||||
COMPATIBLE_IOCTL(BNEPGETCONNLIST)
|
||||
COMPATIBLE_IOCTL(BNEPGETCONNINFO)
|
||||
COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
|
||||
COMPATIBLE_IOCTL(CMTPCONNADD)
|
||||
COMPATIBLE_IOCTL(CMTPCONNDEL)
|
||||
COMPATIBLE_IOCTL(CMTPGETCONNLIST)
|
||||
|
@ -269,11 +269,23 @@ struct l2cap_ctrl {
|
||||
__u16 reqseq;
|
||||
__u16 txseq;
|
||||
__u8 retries;
|
||||
__le16 psm;
|
||||
bdaddr_t bdaddr;
|
||||
struct l2cap_chan *chan;
|
||||
};
|
||||
|
||||
struct hci_dev;
|
||||
|
||||
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
|
||||
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode, struct sk_buff *skb);
|
||||
|
||||
struct req_ctrl {
|
||||
bool start;
|
||||
u8 event;
|
||||
hci_req_complete_t complete;
|
||||
hci_req_complete_skb_t complete_skb;
|
||||
};
|
||||
|
||||
struct bt_skb_cb {
|
||||
__u8 pkt_type;
|
||||
@ -281,13 +293,10 @@ struct bt_skb_cb {
|
||||
__u16 opcode;
|
||||
__u16 expect;
|
||||
__u8 incoming:1;
|
||||
__u8 req_start:1;
|
||||
u8 req_event;
|
||||
hci_req_complete_t req_complete;
|
||||
struct l2cap_chan *chan;
|
||||
struct l2cap_ctrl control;
|
||||
bdaddr_t bdaddr;
|
||||
__le16 psm;
|
||||
union {
|
||||
struct l2cap_ctrl l2cap;
|
||||
struct req_ctrl req;
|
||||
};
|
||||
};
|
||||
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
|
||||
|
||||
|
@ -326,7 +326,6 @@ struct hci_dev {
|
||||
struct sk_buff_head raw_q;
|
||||
struct sk_buff_head cmd_q;
|
||||
|
||||
struct sk_buff *recv_evt;
|
||||
struct sk_buff *sent_cmd;
|
||||
struct sk_buff *reassembly[NUM_REASSEMBLY];
|
||||
|
||||
@ -334,6 +333,7 @@ struct hci_dev {
|
||||
wait_queue_head_t req_wait_q;
|
||||
__u32 req_status;
|
||||
__u32 req_result;
|
||||
struct sk_buff *req_skb;
|
||||
|
||||
void *smp_data;
|
||||
void *smp_bredr_data;
|
||||
@ -1284,8 +1284,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
|
||||
int hci_register_cb(struct hci_cb *hcb);
|
||||
int hci_unregister_cb(struct hci_cb *hcb);
|
||||
|
||||
bool hci_req_pending(struct hci_dev *hdev);
|
||||
|
||||
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u32 timeout);
|
||||
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
@ -1393,9 +1391,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
|
||||
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
|
||||
u8 status);
|
||||
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
|
||||
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
|
||||
u8 *rand192, u8 *hash256, u8 *rand256,
|
||||
u8 status);
|
||||
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
|
||||
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
|
||||
|
@ -111,6 +111,10 @@ struct bnep_ext_hdr {
|
||||
#define BNEPCONNDEL _IOW('B', 201, int)
|
||||
#define BNEPGETCONNLIST _IOR('B', 210, int)
|
||||
#define BNEPGETCONNINFO _IOR('B', 211, int)
|
||||
#define BNEPGETSUPPFEAT _IOR('B', 212, int)
|
||||
|
||||
#define BNEP_SETUP_RESPONSE 0
|
||||
#define BNEP_SETUP_RSP_SENT 10
|
||||
|
||||
struct bnep_connadd_req {
|
||||
int sock; /* Connected socket */
|
||||
|
@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
|
||||
break;
|
||||
|
||||
case BNEP_SETUP_CONN_REQ:
|
||||
err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED);
|
||||
/* Successful response should be sent only once */
|
||||
if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) &&
|
||||
!test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags))
|
||||
err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
|
||||
BNEP_SUCCESS);
|
||||
else
|
||||
err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
|
||||
BNEP_CONN_NOT_ALLOWED);
|
||||
break;
|
||||
|
||||
default: {
|
||||
@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
|
||||
pkt[0] = BNEP_CONTROL;
|
||||
pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
|
||||
pkt[2] = cmd;
|
||||
bnep_send(s, pkt, sizeof(pkt));
|
||||
err = bnep_send(s, pkt, sizeof(pkt));
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -292,30 +299,56 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = s->dev;
|
||||
struct sk_buff *nskb;
|
||||
u8 type;
|
||||
u8 type, ctrl_type;
|
||||
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
|
||||
type = *(u8 *) skb->data;
|
||||
skb_pull(skb, 1);
|
||||
ctrl_type = *(u8 *)skb->data;
|
||||
|
||||
if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
|
||||
goto badframe;
|
||||
|
||||
if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
|
||||
bnep_rx_control(s, skb->data, skb->len);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
if (bnep_rx_control(s, skb->data, skb->len) < 0) {
|
||||
dev->stats.tx_errors++;
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(type & BNEP_EXT_HEADER)) {
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Verify and pull ctrl message since it's already processed */
|
||||
switch (ctrl_type) {
|
||||
case BNEP_SETUP_CONN_REQ:
|
||||
/* Pull: ctrl type (1 b), len (1 b), data (len bytes) */
|
||||
if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
|
||||
goto badframe;
|
||||
break;
|
||||
case BNEP_FILTER_MULTI_ADDR_SET:
|
||||
case BNEP_FILTER_NET_TYPE_SET:
|
||||
/* Pull: ctrl type (1 b), len (2 b), data (len bytes) */
|
||||
if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
|
||||
goto badframe;
|
||||
break;
|
||||
default:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/* Verify and pull out header */
|
||||
if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
|
||||
goto badframe;
|
||||
|
||||
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
|
||||
}
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/* Verify and pull out header */
|
||||
if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
|
||||
goto badframe;
|
||||
|
||||
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
|
||||
|
||||
if (type & BNEP_EXT_HEADER) {
|
||||
if (bnep_rx_extension(s, skb) < 0)
|
||||
goto badframe;
|
||||
@ -525,6 +558,7 @@ static struct device_type bnep_type = {
|
||||
|
||||
int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
|
||||
{
|
||||
u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
|
||||
struct net_device *dev;
|
||||
struct bnep_session *s, *ss;
|
||||
u8 dst[ETH_ALEN], src[ETH_ALEN];
|
||||
@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
|
||||
if (!l2cap_is_socket(sock))
|
||||
return -EBADFD;
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
|
||||
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
|
||||
|
||||
@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
|
||||
s->sock = sock;
|
||||
s->role = req->role;
|
||||
s->state = BT_CONNECTED;
|
||||
s->flags = req->flags;
|
||||
|
||||
s->msg.msg_flags = MSG_NOSIGNAL;
|
||||
|
||||
@ -611,11 +649,15 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
|
||||
|
||||
int bnep_del_connection(struct bnep_conndel_req *req)
|
||||
{
|
||||
u32 valid_flags = 0;
|
||||
struct bnep_session *s;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("");
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&bnep_session_sem);
|
||||
|
||||
s = __bnep_get_session(req->dst);
|
||||
@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req)
|
||||
|
||||
static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
|
||||
{
|
||||
u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
|
||||
|
||||
memset(ci, 0, sizeof(*ci));
|
||||
memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
|
||||
strcpy(ci->device, s->dev->name);
|
||||
ci->flags = s->flags;
|
||||
ci->flags = s->flags & valid_flags;
|
||||
ci->state = s->state;
|
||||
ci->role = s->role;
|
||||
}
|
||||
|
@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
|
||||
struct bnep_conninfo ci;
|
||||
struct socket *nsock;
|
||||
void __user *argp = (void __user *)arg;
|
||||
__u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
|
||||
int err;
|
||||
|
||||
BT_DBG("cmd %x arg %lx", cmd, arg);
|
||||
@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
|
||||
|
||||
return err;
|
||||
|
||||
case BNEPGETSUPPFEAT:
|
||||
if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
if (session->flags & (1 << CMTP_LOOPBACK)) {
|
||||
if (session->flags & BIT(CMTP_LOOPBACK)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
|
||||
|
||||
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
|
||||
{
|
||||
u32 valid_flags = BIT(CMTP_LOOPBACK);
|
||||
memset(ci, 0, sizeof(*ci));
|
||||
bacpy(&ci->bdaddr, &session->bdaddr);
|
||||
|
||||
ci->flags = session->flags;
|
||||
ci->flags = session->flags & valid_flags;
|
||||
ci->state = session->state;
|
||||
|
||||
ci->num = session->num;
|
||||
@ -313,7 +314,7 @@ static int cmtp_session(void *arg)
|
||||
|
||||
down_write(&cmtp_session_sem);
|
||||
|
||||
if (!(session->flags & (1 << CMTP_LOOPBACK)))
|
||||
if (!(session->flags & BIT(CMTP_LOOPBACK)))
|
||||
cmtp_detach_device(session);
|
||||
|
||||
fput(session->sock->file);
|
||||
@ -329,6 +330,7 @@ static int cmtp_session(void *arg)
|
||||
|
||||
int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
|
||||
{
|
||||
u32 valid_flags = BIT(CMTP_LOOPBACK);
|
||||
struct cmtp_session *session, *s;
|
||||
int i, err;
|
||||
|
||||
@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
|
||||
if (!l2cap_is_socket(sock))
|
||||
return -EBADFD;
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
|
||||
if (!session)
|
||||
return -ENOMEM;
|
||||
@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
|
||||
goto unlink;
|
||||
}
|
||||
|
||||
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
|
||||
if (!(session->flags & BIT(CMTP_LOOPBACK))) {
|
||||
err = cmtp_attach_device(session);
|
||||
if (err < 0) {
|
||||
atomic_inc(&session->terminate);
|
||||
@ -409,11 +414,15 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
|
||||
|
||||
int cmtp_del_connection(struct cmtp_conndel_req *req)
|
||||
{
|
||||
u32 valid_flags = 0;
|
||||
struct cmtp_session *session;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("");
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&cmtp_session_sem);
|
||||
|
||||
session = __cmtp_get_session(&req->bdaddr);
|
||||
|
@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = {
|
||||
|
||||
/* ---- HCI requests ---- */
|
||||
|
||||
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
|
||||
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
BT_DBG("%s result 0x%2.2x", hdev->name, result);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
hdev->req_result = result;
|
||||
hdev->req_status = HCI_REQ_DONE;
|
||||
if (skb)
|
||||
hdev->req_skb = skb_get(skb);
|
||||
wake_up_interruptible(&hdev->req_wait_q);
|
||||
}
|
||||
}
|
||||
@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
|
||||
}
|
||||
}
|
||||
|
||||
static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
|
||||
u8 event)
|
||||
{
|
||||
struct hci_ev_cmd_complete *ev;
|
||||
struct hci_event_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
skb = hdev->recv_evt;
|
||||
hdev->recv_evt = NULL;
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENODATA);
|
||||
|
||||
if (skb->len < sizeof(*hdr)) {
|
||||
BT_ERR("Too short HCI event");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
hdr = (void *) skb->data;
|
||||
skb_pull(skb, HCI_EVENT_HDR_SIZE);
|
||||
|
||||
if (event) {
|
||||
if (hdr->evt != event)
|
||||
goto failed;
|
||||
return skb;
|
||||
}
|
||||
|
||||
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
|
||||
BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (skb->len < sizeof(*ev)) {
|
||||
BT_ERR("Too short cmd_complete event");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ev = (void *) skb->data;
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
if (opcode == __le16_to_cpu(ev->opcode))
|
||||
return skb;
|
||||
|
||||
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
|
||||
__le16_to_cpu(ev->opcode));
|
||||
|
||||
failed:
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(-ENODATA);
|
||||
}
|
||||
|
||||
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u8 event, u32 timeout)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
struct hci_request req;
|
||||
struct sk_buff *skb;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
add_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
err = hci_req_run(&req, hci_req_sync_complete);
|
||||
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
||||
if (err < 0) {
|
||||
remove_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_RUNNING);
|
||||
@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
}
|
||||
|
||||
hdev->req_status = hdev->req_result = 0;
|
||||
skb = hdev->req_skb;
|
||||
hdev->req_skb = NULL;
|
||||
|
||||
BT_DBG("%s end: err %d", hdev->name, err);
|
||||
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return hci_get_cmd_complete(hdev, opcode, event);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENODATA);
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(__hci_cmd_sync_ev);
|
||||
|
||||
@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev,
|
||||
add_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
err = hci_req_run(&req, hci_req_sync_complete);
|
||||
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
||||
if (err < 0) {
|
||||
hdev->req_status = 0;
|
||||
|
||||
@ -1690,9 +1646,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
hdev->sent_cmd = NULL;
|
||||
}
|
||||
|
||||
kfree_skb(hdev->recv_evt);
|
||||
hdev->recv_evt = NULL;
|
||||
|
||||
/* After this point our queues are empty
|
||||
* and no tasks are scheduled. */
|
||||
hdev->close(hdev);
|
||||
@ -3563,11 +3516,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
bool hci_req_pending(struct hci_dev *hdev)
|
||||
{
|
||||
return (hdev->req_status == HCI_REQ_PEND);
|
||||
}
|
||||
|
||||
/* Send HCI command */
|
||||
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
|
||||
const void *param)
|
||||
@ -3585,7 +3533,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
|
||||
/* Stand-alone HCI commands must be flagged as
|
||||
* single-command requests.
|
||||
*/
|
||||
bt_cb(skb)->req_start = 1;
|
||||
bt_cb(skb)->req.start = true;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
@ -4263,7 +4211,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
|
||||
if (!skb)
|
||||
return true;
|
||||
|
||||
return bt_cb(skb)->req_start;
|
||||
return bt_cb(skb)->req.start;
|
||||
}
|
||||
|
||||
static void hci_resend_last(struct hci_dev *hdev)
|
||||
@ -4288,9 +4236,10 @@ static void hci_resend_last(struct hci_dev *hdev)
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
|
||||
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
|
||||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb)
|
||||
{
|
||||
hci_req_complete_t req_complete = NULL;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
|
||||
@ -4322,36 +4271,29 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
|
||||
* callback would be found in hdev->sent_cmd instead of the
|
||||
* command queue (hdev->cmd_q).
|
||||
*/
|
||||
if (hdev->sent_cmd) {
|
||||
req_complete = bt_cb(hdev->sent_cmd)->req_complete;
|
||||
if (bt_cb(hdev->sent_cmd)->req.complete) {
|
||||
*req_complete = bt_cb(hdev->sent_cmd)->req.complete;
|
||||
return;
|
||||
}
|
||||
|
||||
if (req_complete) {
|
||||
/* We must set the complete callback to NULL to
|
||||
* avoid calling the callback more than once if
|
||||
* this function gets called again.
|
||||
*/
|
||||
bt_cb(hdev->sent_cmd)->req_complete = NULL;
|
||||
|
||||
goto call_complete;
|
||||
}
|
||||
if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
|
||||
*req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Remove all pending commands belonging to this request */
|
||||
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
|
||||
while ((skb = __skb_dequeue(&hdev->cmd_q))) {
|
||||
if (bt_cb(skb)->req_start) {
|
||||
if (bt_cb(skb)->req.start) {
|
||||
__skb_queue_head(&hdev->cmd_q, skb);
|
||||
break;
|
||||
}
|
||||
|
||||
req_complete = bt_cb(skb)->req_complete;
|
||||
*req_complete = bt_cb(skb)->req.complete;
|
||||
*req_complete_skb = bt_cb(skb)->req.complete_skb;
|
||||
kfree_skb(skb);
|
||||
}
|
||||
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
|
||||
|
||||
call_complete:
|
||||
if (req_complete)
|
||||
req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
|
||||
}
|
||||
|
||||
static void hci_rx_work(struct work_struct *work)
|
||||
|
@ -114,6 +114,30 @@ static const struct file_operations features_fops = {
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int device_id_show(struct seq_file *f, void *ptr)
|
||||
{
|
||||
struct hci_dev *hdev = f->private;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source,
|
||||
hdev->devid_vendor, hdev->devid_product, hdev->devid_version);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int device_id_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, device_id_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations device_id_fops = {
|
||||
.open = device_id_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int device_list_show(struct seq_file *f, void *ptr)
|
||||
{
|
||||
struct hci_dev *hdev = f->private;
|
||||
@ -335,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
|
||||
debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
|
||||
debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
|
||||
&hdev->hw_error_code);
|
||||
debugfs_create_file("device_id", 0444, hdev->debugfs, hdev,
|
||||
&device_id_fops);
|
||||
|
||||
debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
|
||||
&device_list_fops);
|
||||
|
@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
|
||||
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
|
||||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
|
||||
rp->status);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
|
||||
@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
|
||||
struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
|
||||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
|
||||
rp->hash256, rp->rand256,
|
||||
rp->status);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
|
||||
static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *) skb->data);
|
||||
@ -2732,17 +2720,19 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
u16 *opcode, u8 *status,
|
||||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb)
|
||||
{
|
||||
struct hci_ev_cmd_complete *ev = (void *) skb->data;
|
||||
u8 status = skb->data[sizeof(*ev)];
|
||||
__u16 opcode;
|
||||
|
||||
*opcode = __le16_to_cpu(ev->opcode);
|
||||
*status = skb->data[sizeof(*ev)];
|
||||
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
opcode = __le16_to_cpu(ev->opcode);
|
||||
|
||||
switch (opcode) {
|
||||
switch (*opcode) {
|
||||
case HCI_OP_INQUIRY_CANCEL:
|
||||
hci_cc_inquiry_cancel(hdev, skb);
|
||||
break;
|
||||
@ -3020,32 +3010,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
|
||||
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
|
||||
break;
|
||||
}
|
||||
|
||||
if (opcode != HCI_OP_NOP)
|
||||
if (*opcode != HCI_OP_NOP)
|
||||
cancel_delayed_work(&hdev->cmd_timer);
|
||||
|
||||
hci_req_cmd_complete(hdev, opcode, status);
|
||||
|
||||
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
|
||||
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
if (!skb_queue_empty(&hdev->cmd_q))
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
|
||||
req_complete_skb);
|
||||
|
||||
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
u16 *opcode, u8 *status,
|
||||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb)
|
||||
{
|
||||
struct hci_ev_cmd_status *ev = (void *) skb->data;
|
||||
__u16 opcode;
|
||||
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
opcode = __le16_to_cpu(ev->opcode);
|
||||
*opcode = __le16_to_cpu(ev->opcode);
|
||||
*status = ev->status;
|
||||
|
||||
switch (opcode) {
|
||||
switch (*opcode) {
|
||||
case HCI_OP_INQUIRY:
|
||||
hci_cs_inquiry(hdev, ev->status);
|
||||
break;
|
||||
@ -3115,22 +3109,29 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
|
||||
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
|
||||
break;
|
||||
}
|
||||
|
||||
if (opcode != HCI_OP_NOP)
|
||||
if (*opcode != HCI_OP_NOP)
|
||||
cancel_delayed_work(&hdev->cmd_timer);
|
||||
|
||||
if (ev->status ||
|
||||
(hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event))
|
||||
hci_req_cmd_complete(hdev, opcode, ev->status);
|
||||
|
||||
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
|
||||
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
|
||||
atomic_set(&hdev->cmd_cnt, 1);
|
||||
if (!skb_queue_empty(&hdev->cmd_q))
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
/* Indicate request completion if the command failed. Also, if
|
||||
* we're not waiting for a special event and we get a success
|
||||
* command status we should try to flag the request as completed
|
||||
* (since for this kind of commands there will not be a command
|
||||
* complete event).
|
||||
*/
|
||||
if (ev->status ||
|
||||
(hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
|
||||
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
|
||||
req_complete_skb);
|
||||
|
||||
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
}
|
||||
|
||||
static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
@ -5031,32 +5032,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
amp_read_loc_assoc_final_data(hdev, hcon);
|
||||
}
|
||||
|
||||
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
|
||||
u8 event, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_ev_cmd_complete *ev;
|
||||
struct hci_event_hdr *hdr;
|
||||
|
||||
if (!skb)
|
||||
return false;
|
||||
|
||||
if (skb->len < sizeof(*hdr)) {
|
||||
BT_ERR("Too short HCI event");
|
||||
return false;
|
||||
}
|
||||
|
||||
hdr = (void *) skb->data;
|
||||
skb_pull(skb, HCI_EVENT_HDR_SIZE);
|
||||
|
||||
if (event) {
|
||||
if (hdr->evt != event)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
|
||||
BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (skb->len < sizeof(*ev)) {
|
||||
BT_ERR("Too short cmd_complete event");
|
||||
return false;
|
||||
}
|
||||
|
||||
ev = (void *) skb->data;
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
if (opcode != __le16_to_cpu(ev->opcode)) {
|
||||
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
|
||||
__le16_to_cpu(ev->opcode));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_event_hdr *hdr = (void *) skb->data;
|
||||
__u8 event = hdr->evt;
|
||||
hci_req_complete_t req_complete = NULL;
|
||||
hci_req_complete_skb_t req_complete_skb = NULL;
|
||||
struct sk_buff *orig_skb = NULL;
|
||||
u8 status = 0, event = hdr->evt, req_evt = 0;
|
||||
u16 opcode = HCI_OP_NOP;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Received events are (currently) only needed when a request is
|
||||
* ongoing so avoid unnecessary memory allocation.
|
||||
*/
|
||||
if (hci_req_pending(hdev)) {
|
||||
kfree_skb(hdev->recv_evt);
|
||||
hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
|
||||
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
|
||||
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
|
||||
opcode = __le16_to_cpu(cmd_hdr->opcode);
|
||||
hci_req_cmd_complete(hdev, opcode, status, &req_complete,
|
||||
&req_complete_skb);
|
||||
req_evt = event;
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
/* If it looks like we might end up having to call
|
||||
* req_complete_skb, store a pristine copy of the skb since the
|
||||
* various handlers may modify the original one through
|
||||
* skb_pull() calls, etc.
|
||||
*/
|
||||
if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
|
||||
event == HCI_EV_CMD_COMPLETE)
|
||||
orig_skb = skb_clone(skb, GFP_KERNEL);
|
||||
|
||||
skb_pull(skb, HCI_EVENT_HDR_SIZE);
|
||||
|
||||
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) {
|
||||
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
|
||||
u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
|
||||
|
||||
hci_req_cmd_complete(hdev, opcode, 0);
|
||||
}
|
||||
|
||||
switch (event) {
|
||||
case HCI_EV_INQUIRY_COMPLETE:
|
||||
hci_inquiry_complete_evt(hdev, skb);
|
||||
@ -5099,11 +5147,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
case HCI_EV_CMD_COMPLETE:
|
||||
hci_cmd_complete_evt(hdev, skb);
|
||||
hci_cmd_complete_evt(hdev, skb, &opcode, &status,
|
||||
&req_complete, &req_complete_skb);
|
||||
break;
|
||||
|
||||
case HCI_EV_CMD_STATUS:
|
||||
hci_cmd_status_evt(hdev, skb);
|
||||
hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
|
||||
&req_complete_skb);
|
||||
break;
|
||||
|
||||
case HCI_EV_HARDWARE_ERROR:
|
||||
@ -5235,6 +5285,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
if (req_complete) {
|
||||
req_complete(hdev, status, opcode);
|
||||
} else if (req_complete_skb) {
|
||||
if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
|
||||
kfree_skb(orig_skb);
|
||||
orig_skb = NULL;
|
||||
}
|
||||
req_complete_skb(hdev, status, opcode, orig_skb);
|
||||
}
|
||||
|
||||
kfree_skb(orig_skb);
|
||||
kfree_skb(skb);
|
||||
hdev->stat.evt_rx++;
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
|
||||
req->err = 0;
|
||||
}
|
||||
|
||||
int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
||||
static int req_run(struct hci_request *req, hci_req_complete_t complete,
|
||||
hci_req_complete_skb_t complete_skb)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct sk_buff *skb;
|
||||
@ -55,7 +56,8 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
||||
return -ENODATA;
|
||||
|
||||
skb = skb_peek_tail(&req->cmd_q);
|
||||
bt_cb(skb)->req_complete = complete;
|
||||
bt_cb(skb)->req.complete = complete;
|
||||
bt_cb(skb)->req.complete_skb = complete_skb;
|
||||
|
||||
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
|
||||
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
|
||||
@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
||||
{
|
||||
return req_run(req, complete, NULL);
|
||||
}
|
||||
|
||||
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
|
||||
{
|
||||
return req_run(req, NULL, complete);
|
||||
}
|
||||
|
||||
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param)
|
||||
{
|
||||
@ -116,9 +128,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
|
||||
}
|
||||
|
||||
if (skb_queue_empty(&req->cmd_q))
|
||||
bt_cb(skb)->req_start = 1;
|
||||
bt_cb(skb)->req.start = true;
|
||||
|
||||
bt_cb(skb)->req_event = event;
|
||||
bt_cb(skb)->req.event = event;
|
||||
|
||||
skb_queue_tail(&req->cmd_q, skb);
|
||||
}
|
||||
|
@ -32,11 +32,14 @@ struct hci_request {
|
||||
|
||||
void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
|
||||
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
|
||||
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
|
||||
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
|
||||
const void *param);
|
||||
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
|
||||
const void *param, u8 event);
|
||||
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
|
||||
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
|
||||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb);
|
||||
|
||||
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param);
|
||||
|
@ -1164,7 +1164,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
/* Stand-alone HCI commands must be flagged as
|
||||
* single-command requests.
|
||||
*/
|
||||
bt_cb(skb)->req_start = 1;
|
||||
bt_cb(skb)->req.start = true;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
|
@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s);
|
||||
|
||||
static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
|
||||
{
|
||||
u32 valid_flags = 0;
|
||||
memset(ci, 0, sizeof(*ci));
|
||||
bacpy(&ci->bdaddr, &session->bdaddr);
|
||||
|
||||
ci->flags = session->flags;
|
||||
ci->flags = session->flags & valid_flags;
|
||||
ci->state = BT_CONNECTED;
|
||||
|
||||
if (session->input) {
|
||||
@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
|
||||
kref_init(&session->ref);
|
||||
atomic_set(&session->state, HIDP_SESSION_IDLING);
|
||||
init_waitqueue_head(&session->state_queue);
|
||||
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
|
||||
session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
|
||||
|
||||
/* connection management */
|
||||
bacpy(&session->bdaddr, bdaddr);
|
||||
@ -1312,6 +1313,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
|
||||
struct socket *ctrl_sock,
|
||||
struct socket *intr_sock)
|
||||
{
|
||||
u32 valid_flags = 0;
|
||||
struct hidp_session *session;
|
||||
struct l2cap_conn *conn;
|
||||
struct l2cap_chan *chan;
|
||||
@ -1321,6 +1323,9 @@ int hidp_connection_add(struct hidp_connadd_req *req,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
chan = l2cap_pi(ctrl_sock->sk)->chan;
|
||||
conn = NULL;
|
||||
l2cap_chan_lock(chan);
|
||||
@ -1351,13 +1356,17 @@ int hidp_connection_add(struct hidp_connadd_req *req,
|
||||
|
||||
int hidp_connection_del(struct hidp_conndel_req *req)
|
||||
{
|
||||
u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
|
||||
struct hidp_session *session;
|
||||
|
||||
if (req->flags & ~valid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
session = hidp_session_find(&req->bdaddr);
|
||||
if (!session)
|
||||
return -ENOENT;
|
||||
|
||||
if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
|
||||
if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
|
||||
hidp_send_ctrl_message(session,
|
||||
HIDP_TRANS_HID_CONTROL |
|
||||
HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
|
||||
|
@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb_queue_walk(head, skb) {
|
||||
if (bt_cb(skb)->control.txseq == seq)
|
||||
if (bt_cb(skb)->l2cap.txseq == seq)
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
|
||||
{
|
||||
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
|
||||
__unpack_extended_control(get_unaligned_le32(skb->data),
|
||||
&bt_cb(skb)->control);
|
||||
&bt_cb(skb)->l2cap);
|
||||
skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
|
||||
} else {
|
||||
__unpack_enhanced_control(get_unaligned_le16(skb->data),
|
||||
&bt_cb(skb)->control);
|
||||
&bt_cb(skb)->l2cap);
|
||||
skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
|
||||
}
|
||||
}
|
||||
@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
|
||||
|
||||
chan->retry_count = 0;
|
||||
skb_queue_walk(&chan->tx_q, skb) {
|
||||
if (bt_cb(skb)->control.retries)
|
||||
bt_cb(skb)->control.retries = 1;
|
||||
if (bt_cb(skb)->l2cap.retries)
|
||||
bt_cb(skb)->l2cap.retries = 1;
|
||||
else
|
||||
break;
|
||||
}
|
||||
@ -1846,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
|
||||
|
||||
skb = skb_dequeue(&chan->tx_q);
|
||||
|
||||
bt_cb(skb)->control.retries = 1;
|
||||
control = &bt_cb(skb)->control;
|
||||
bt_cb(skb)->l2cap.retries = 1;
|
||||
control = &bt_cb(skb)->l2cap;
|
||||
|
||||
control->reqseq = 0;
|
||||
control->txseq = chan->next_tx_seq;
|
||||
@ -1891,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
|
||||
|
||||
skb = chan->tx_send_head;
|
||||
|
||||
bt_cb(skb)->control.retries = 1;
|
||||
control = &bt_cb(skb)->control;
|
||||
bt_cb(skb)->l2cap.retries = 1;
|
||||
control = &bt_cb(skb)->l2cap;
|
||||
|
||||
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
|
||||
control->final = 1;
|
||||
@ -1963,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
|
||||
continue;
|
||||
}
|
||||
|
||||
bt_cb(skb)->control.retries++;
|
||||
control = bt_cb(skb)->control;
|
||||
bt_cb(skb)->l2cap.retries++;
|
||||
control = bt_cb(skb)->l2cap;
|
||||
|
||||
if (chan->max_tx != 0 &&
|
||||
bt_cb(skb)->control.retries > chan->max_tx) {
|
||||
bt_cb(skb)->l2cap.retries > chan->max_tx) {
|
||||
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
|
||||
l2cap_send_disconn_req(chan, ECONNRESET);
|
||||
l2cap_seq_list_clear(&chan->retrans_list);
|
||||
@ -2045,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
|
||||
|
||||
if (chan->unacked_frames) {
|
||||
skb_queue_walk(&chan->tx_q, skb) {
|
||||
if (bt_cb(skb)->control.txseq == control->reqseq ||
|
||||
if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
|
||||
skb == chan->tx_send_head)
|
||||
break;
|
||||
}
|
||||
@ -2055,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
|
||||
break;
|
||||
|
||||
l2cap_seq_list_append(&chan->retrans_list,
|
||||
bt_cb(skb)->control.txseq);
|
||||
bt_cb(skb)->l2cap.txseq);
|
||||
}
|
||||
|
||||
l2cap_ertm_resend(chan);
|
||||
@ -2267,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
bt_cb(skb)->control.fcs = chan->fcs;
|
||||
bt_cb(skb)->control.retries = 0;
|
||||
bt_cb(skb)->l2cap.fcs = chan->fcs;
|
||||
bt_cb(skb)->l2cap.retries = 0;
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -2321,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
|
||||
bt_cb(skb)->control.sar = sar;
|
||||
bt_cb(skb)->l2cap.sar = sar;
|
||||
__skb_queue_tail(seg_queue, skb);
|
||||
|
||||
len -= pdu_len;
|
||||
@ -2856,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
continue;
|
||||
|
||||
/* Don't send frame to the channel it came from */
|
||||
if (bt_cb(skb)->chan == chan)
|
||||
if (bt_cb(skb)->l2cap.chan == chan)
|
||||
continue;
|
||||
|
||||
nskb = skb_clone(skb, GFP_KERNEL);
|
||||
@ -5918,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
|
||||
|
||||
skb_unlink(skb, &chan->srej_q);
|
||||
chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
|
||||
err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
|
||||
err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
@ -5952,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
|
||||
return;
|
||||
}
|
||||
|
||||
if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
|
||||
if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
|
||||
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
|
||||
l2cap_send_disconn_req(chan, ECONNRESET);
|
||||
return;
|
||||
@ -6005,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
|
||||
skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
|
||||
|
||||
if (chan->max_tx && skb &&
|
||||
bt_cb(skb)->control.retries >= chan->max_tx) {
|
||||
bt_cb(skb)->l2cap.retries >= chan->max_tx) {
|
||||
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
|
||||
l2cap_send_disconn_req(chan, ECONNRESET);
|
||||
return;
|
||||
@ -6565,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
||||
|
||||
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
{
|
||||
struct l2cap_ctrl *control = &bt_cb(skb)->control;
|
||||
struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
|
||||
u16 len;
|
||||
u8 event;
|
||||
|
||||
@ -6864,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
|
||||
goto drop;
|
||||
|
||||
/* Store remote BD_ADDR and PSM for msg_name */
|
||||
bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
|
||||
bt_cb(skb)->psm = psm;
|
||||
bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
|
||||
bt_cb(skb)->l2cap.psm = psm;
|
||||
|
||||
if (!chan->ops->recv(chan, skb)) {
|
||||
l2cap_chan_put(chan);
|
||||
|
@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
|
||||
|
||||
skb->priority = sk->sk_priority;
|
||||
|
||||
bt_cb(skb)->chan = chan;
|
||||
bt_cb(skb)->l2cap.chan = chan;
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
|
||||
|
||||
memset(la, 0, sizeof(struct sockaddr_l2));
|
||||
la->l2_family = AF_BLUETOOTH;
|
||||
la->l2_psm = bt_cb(skb)->psm;
|
||||
bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
|
||||
la->l2_psm = bt_cb(skb)->l2cap.psm;
|
||||
bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
|
||||
|
||||
*msg_namelen = sizeof(struct sockaddr_l2);
|
||||
}
|
||||
|
@ -985,14 +985,27 @@ static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
|
||||
/* Instance 0 always manages the "Tx Power" and "Flags" fields */
|
||||
flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
|
||||
|
||||
/* For instance 0, assemble the flags from global settings */
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) ||
|
||||
get_connectable(hdev))
|
||||
/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
|
||||
* to the "connectable" instance flag.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
|
||||
flags |= MGMT_ADV_FLAG_CONNECTABLE;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
|
||||
{
|
||||
/* Ignore instance 0 and other unsupported instances */
|
||||
if (instance != 0x01)
|
||||
return 0;
|
||||
|
||||
/* TODO: Take into account the "appearance" and "local-name" flags here.
|
||||
* These are currently being ignored as they are not supported.
|
||||
*/
|
||||
return hdev->adv_instance.scan_rsp_len;
|
||||
}
|
||||
|
||||
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
|
||||
{
|
||||
u8 ad_len = 0, flags = 0;
|
||||
@ -1030,6 +1043,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
|
||||
}
|
||||
}
|
||||
|
||||
if (instance) {
|
||||
memcpy(ptr, hdev->adv_instance.adv_data,
|
||||
hdev->adv_instance.adv_data_len);
|
||||
|
||||
ad_len += hdev->adv_instance.adv_data_len;
|
||||
ptr += hdev->adv_instance.adv_data_len;
|
||||
}
|
||||
|
||||
/* Provide Tx Power only if we can provide a valid value for it */
|
||||
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
|
||||
(instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
|
||||
@ -1041,12 +1062,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
|
||||
ptr += 3;
|
||||
}
|
||||
|
||||
if (instance) {
|
||||
memcpy(ptr, hdev->adv_instance.adv_data,
|
||||
hdev->adv_instance.adv_data_len);
|
||||
ad_len += hdev->adv_instance.adv_data_len;
|
||||
}
|
||||
|
||||
return ad_len;
|
||||
}
|
||||
|
||||
@ -1242,7 +1257,12 @@ static void enable_advertising(struct hci_request *req)
|
||||
|
||||
instance = get_current_adv_instance(hdev);
|
||||
flags = get_adv_instance_flags(hdev, instance);
|
||||
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE);
|
||||
|
||||
/* If the "connectable" instance flag was not set, then choose between
|
||||
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
|
||||
*/
|
||||
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
|
||||
get_connectable(hdev);
|
||||
|
||||
/* Set require_privacy to true only when non-connectable
|
||||
* advertising is used. In that case it is fine to use a
|
||||
@ -1254,7 +1274,14 @@ static void enable_advertising(struct hci_request *req)
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
|
||||
cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
|
||||
cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
|
||||
|
||||
if (connectable)
|
||||
cp.type = LE_ADV_IND;
|
||||
else if (get_adv_instance_scan_rsp_len(hdev, instance))
|
||||
cp.type = LE_ADV_SCAN_IND;
|
||||
else
|
||||
cp.type = LE_ADV_NONCONN_IND;
|
||||
|
||||
cp.own_address_type = own_addr_type;
|
||||
cp.channel_map = hdev->le_adv_channel_map;
|
||||
|
||||
@ -2088,7 +2115,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
|
||||
no_scan_update:
|
||||
/* Update the advertising parameters if necessary */
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
|
||||
hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
|
||||
enable_advertising(&req);
|
||||
|
||||
err = hci_req_run(&req, set_connectable_complete);
|
||||
@ -3757,10 +3785,70 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode, struct sk_buff *skb)
|
||||
{
|
||||
struct mgmt_rp_read_local_oob_data mgmt_rp;
|
||||
size_t rp_size = sizeof(mgmt_rp);
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
|
||||
BT_DBG("%s status %u", hdev->name, status);
|
||||
|
||||
cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
|
||||
if (!cmd)
|
||||
return;
|
||||
|
||||
if (status || !skb) {
|
||||
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
|
||||
status ? mgmt_status(status) : MGMT_STATUS_FAILED);
|
||||
goto remove;
|
||||
}
|
||||
|
||||
memset(&mgmt_rp, 0, sizeof(mgmt_rp));
|
||||
|
||||
if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
|
||||
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
|
||||
|
||||
if (skb->len < sizeof(*rp)) {
|
||||
mgmt_cmd_status(cmd->sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_DATA,
|
||||
MGMT_STATUS_FAILED);
|
||||
goto remove;
|
||||
}
|
||||
|
||||
memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
|
||||
memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
|
||||
|
||||
rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
|
||||
} else {
|
||||
struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
|
||||
|
||||
if (skb->len < sizeof(*rp)) {
|
||||
mgmt_cmd_status(cmd->sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_DATA,
|
||||
MGMT_STATUS_FAILED);
|
||||
goto remove;
|
||||
}
|
||||
|
||||
memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
|
||||
memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
|
||||
|
||||
memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
|
||||
memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
|
||||
}
|
||||
|
||||
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
|
||||
MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
|
||||
|
||||
remove:
|
||||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 data_len)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
@ -3791,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (bredr_sc_enabled(hdev))
|
||||
err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
0, NULL);
|
||||
else
|
||||
err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (bredr_sc_enabled(hdev))
|
||||
hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
|
||||
else
|
||||
hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
|
||||
|
||||
err = hci_req_run_skb(&req, read_local_oob_data_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
@ -6388,46 +6478,41 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!hdev_is_powered(hdev))
|
||||
return mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
MGMT_STATUS_NOT_POWERED,
|
||||
&cp->type, sizeof(cp->type));
|
||||
|
||||
switch (cp->type) {
|
||||
case BIT(BDADDR_BREDR):
|
||||
status = mgmt_bredr_support(hdev);
|
||||
if (status)
|
||||
return mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
status, &cp->type,
|
||||
sizeof(cp->type));
|
||||
eir_len = 5;
|
||||
break;
|
||||
case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
|
||||
status = mgmt_le_support(hdev);
|
||||
if (status)
|
||||
return mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
status, &cp->type,
|
||||
sizeof(cp->type));
|
||||
eir_len = 9 + 3 + 18 + 18 + 3;
|
||||
break;
|
||||
default:
|
||||
return mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->type, sizeof(cp->type));
|
||||
if (hdev_is_powered(hdev)) {
|
||||
switch (cp->type) {
|
||||
case BIT(BDADDR_BREDR):
|
||||
status = mgmt_bredr_support(hdev);
|
||||
if (status)
|
||||
eir_len = 0;
|
||||
else
|
||||
eir_len = 5;
|
||||
break;
|
||||
case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
|
||||
status = mgmt_le_support(hdev);
|
||||
if (status)
|
||||
eir_len = 0;
|
||||
else
|
||||
eir_len = 9 + 3 + 18 + 18 + 3;
|
||||
break;
|
||||
default:
|
||||
status = MGMT_STATUS_INVALID_PARAMS;
|
||||
eir_len = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
status = MGMT_STATUS_NOT_POWERED;
|
||||
eir_len = 0;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
rp_len = sizeof(*rp) + eir_len;
|
||||
rp = kmalloc(rp_len, GFP_ATOMIC);
|
||||
if (!rp) {
|
||||
hci_dev_unlock(hdev);
|
||||
if (!rp)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (status)
|
||||
goto complete;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
eir_len = 0;
|
||||
switch (cp->type) {
|
||||
@ -6439,20 +6524,30 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
|
||||
if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
|
||||
smp_generate_oob(hdev, hash, rand) < 0) {
|
||||
hci_dev_unlock(hdev);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
MGMT_STATUS_FAILED,
|
||||
&cp->type, sizeof(cp->type));
|
||||
goto done;
|
||||
status = MGMT_STATUS_FAILED;
|
||||
goto complete;
|
||||
}
|
||||
|
||||
/* This should return the active RPA, but since the RPA
|
||||
* is only programmed on demand, it is really hard to fill
|
||||
* this in at the moment. For now disallow retrieving
|
||||
* local out-of-band data when privacy is in use.
|
||||
*
|
||||
* Returning the identity address will not help here since
|
||||
* pairing happens before the identity resolving key is
|
||||
* known and thus the connection establishment happens
|
||||
* based on the RPA and not the identity address.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
|
||||
memcpy(addr, &hdev->rpa, 6);
|
||||
addr[6] = 0x01;
|
||||
} else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
|
||||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
|
||||
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
|
||||
bacmp(&hdev->static_addr, BDADDR_ANY))) {
|
||||
hci_dev_unlock(hdev);
|
||||
status = MGMT_STATUS_REJECTED;
|
||||
goto complete;
|
||||
}
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
|
||||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
|
||||
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
|
||||
bacmp(&hdev->static_addr, BDADDR_ANY))) {
|
||||
memcpy(addr, &hdev->static_addr, 6);
|
||||
addr[6] = 0x01;
|
||||
} else {
|
||||
@ -6491,16 +6586,19 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
|
||||
break;
|
||||
}
|
||||
|
||||
rp->type = cp->type;
|
||||
rp->eir_len = cpu_to_le16(eir_len);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
|
||||
|
||||
status = MGMT_STATUS_SUCCESS;
|
||||
|
||||
complete:
|
||||
rp->type = cp->type;
|
||||
rp->eir_len = cpu_to_le16(eir_len);
|
||||
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
|
||||
MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
|
||||
if (err < 0)
|
||||
status, rp, sizeof(*rp) + eir_len);
|
||||
if (err < 0 || status)
|
||||
goto done;
|
||||
|
||||
err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
|
||||
@ -7899,43 +7997,6 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
|
||||
cmd ? cmd->sk : NULL);
|
||||
}
|
||||
|
||||
void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
|
||||
u8 *rand192, u8 *hash256, u8 *rand256,
|
||||
u8 status)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
|
||||
BT_DBG("%s status %u", hdev->name, status);
|
||||
|
||||
cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
|
||||
if (!cmd)
|
||||
return;
|
||||
|
||||
if (status) {
|
||||
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
|
||||
mgmt_status(status));
|
||||
} else {
|
||||
struct mgmt_rp_read_local_oob_data rp;
|
||||
size_t rp_size = sizeof(rp);
|
||||
|
||||
memcpy(rp.hash192, hash192, sizeof(rp.hash192));
|
||||
memcpy(rp.rand192, rand192, sizeof(rp.rand192));
|
||||
|
||||
if (bredr_sc_enabled(hdev) && hash256 && rand256) {
|
||||
memcpy(rp.hash256, hash256, sizeof(rp.hash256));
|
||||
memcpy(rp.rand256, rand256, sizeof(rp.rand256));
|
||||
} else {
|
||||
rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
|
||||
}
|
||||
|
||||
mgmt_cmd_complete(cmd->sk, hdev->id,
|
||||
MGMT_OP_READ_LOCAL_OOB_DATA, 0,
|
||||
&rp, rp_size);
|
||||
}
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
|
||||
{
|
||||
int i;
|
||||
|
@ -21,6 +21,8 @@
|
||||
SOFTWARE IS DISCLAIMED.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
|
||||
@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char test_ecdh_buffer[32];
|
||||
|
||||
static ssize_t test_ecdh_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer,
|
||||
strlen(test_ecdh_buffer));
|
||||
}
|
||||
|
||||
static const struct file_operations test_ecdh_fops = {
|
||||
.open = simple_open,
|
||||
.read = test_ecdh_read,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static int __init test_ecdh(void)
|
||||
{
|
||||
ktime_t calltime, delta, rettime;
|
||||
@ -165,19 +182,19 @@ static int __init test_ecdh(void)
|
||||
err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
|
||||
if (err) {
|
||||
BT_ERR("ECDH sample 1 failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
|
||||
if (err) {
|
||||
BT_ERR("ECDH sample 2 failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
|
||||
if (err) {
|
||||
BT_ERR("ECDH sample 3 failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
rettime = ktime_get();
|
||||
@ -186,7 +203,17 @@ static int __init test_ecdh(void)
|
||||
|
||||
BT_INFO("ECDH test passed in %llu usecs", duration);
|
||||
|
||||
return 0;
|
||||
done:
|
||||
if (!err)
|
||||
snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer),
|
||||
"PASS (%llu usecs)\n", duration);
|
||||
else
|
||||
snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n");
|
||||
|
||||
debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL,
|
||||
&test_ecdh_fops);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -3017,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
skb->priority = HCI_PRIO_MAX;
|
||||
bt_cb(skb)->chan = chan;
|
||||
bt_cb(skb)->l2cap.chan = chan;
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -3549,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char test_smp_buffer[32];
|
||||
|
||||
static ssize_t test_smp_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer,
|
||||
strlen(test_smp_buffer));
|
||||
}
|
||||
|
||||
static const struct file_operations test_smp_fops = {
|
||||
.open = simple_open,
|
||||
.read = test_smp_read,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
|
||||
struct crypto_hash *tfm_cmac)
|
||||
{
|
||||
@ -3561,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
|
||||
err = test_ah(tfm_aes);
|
||||
if (err) {
|
||||
BT_ERR("smp_ah test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_c1(tfm_aes);
|
||||
if (err) {
|
||||
BT_ERR("smp_c1 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_s1(tfm_aes);
|
||||
if (err) {
|
||||
BT_ERR("smp_s1 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_f4(tfm_cmac);
|
||||
if (err) {
|
||||
BT_ERR("smp_f4 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_f5(tfm_cmac);
|
||||
if (err) {
|
||||
BT_ERR("smp_f5 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_f6(tfm_cmac);
|
||||
if (err) {
|
||||
BT_ERR("smp_f6 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_g2(tfm_cmac);
|
||||
if (err) {
|
||||
BT_ERR("smp_g2 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = test_h6(tfm_cmac);
|
||||
if (err) {
|
||||
BT_ERR("smp_h6 test failed");
|
||||
return err;
|
||||
goto done;
|
||||
}
|
||||
|
||||
rettime = ktime_get();
|
||||
@ -3612,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
|
||||
|
||||
BT_INFO("SMP test passed in %llu usecs", duration);
|
||||
|
||||
return 0;
|
||||
done:
|
||||
if (!err)
|
||||
snprintf(test_smp_buffer, sizeof(test_smp_buffer),
|
||||
"PASS (%llu usecs)\n", duration);
|
||||
else
|
||||
snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n");
|
||||
|
||||
debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL,
|
||||
&test_smp_fops);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int __init bt_selftest_smp(void)
|
||||
|
Loading…
Reference in New Issue
Block a user